2019-05-03 00:29:04 +02:00
|
|
|
// Copyright (c) Microsoft Corporation.
|
|
|
|
// Licensed under the MIT license.
|
|
|
|
|
|
|
|
#include "precomp.h"
|
|
|
|
|
|
|
|
#include "DxRenderer.hpp"
|
|
|
|
#include "CustomTextLayout.h"
|
|
|
|
|
|
|
|
#include "../../interactivity/win32/CustomWindowMessages.h"
|
|
|
|
#include "../../types/inc/Viewport.hpp"
|
|
|
|
#include "../../inc/unicode.hpp"
|
|
|
|
#include "../../inc/DefaultSettings.h"
|
2019-07-12 00:20:15 +02:00
|
|
|
#include <VersionHelpers.h>
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2019-12-12 14:44:01 +01:00
|
|
|
#include "ScreenPixelShader.h"
|
|
|
|
#include "ScreenVertexShader.h"
|
|
|
|
#include <DirectXMath.h>
|
|
|
|
#include <d3dcompiler.h>
|
|
|
|
#include <DirectXColors.h>
|
|
|
|
|
|
|
|
using namespace DirectX;
|
|
|
|
|
2020-02-21 00:13:43 +01:00
|
|
|
std::atomic<size_t> Microsoft::Console::Render::DxEngine::_tracelogCount{ 0 };
|
Restrict DX run height adjustment to only relevant glyph AND Correct PTY rendering on trailing half of fullwidth glyphs (#4668)
## Summary of the Pull Request
- Height adjustment of a glyph is now restricted to itself in the DX
renderer instead of applying to the entire run
- ConPTY compensates for drawing the right half of a fullwidth
character. The entire render base has this behavior restored now as
well.
## PR Checklist
* [x] Closes #2191
* [x] I work here
* [x] Tests added/passed
* [x] No doc
* [x] Am core contributor.
## Detailed Description of the Pull Request / Additional comments
Two issues:
1. On the DirectX renderer side, when confronted with shrinking a glyph,
the correction code would apply the shrunken size to the entire run, not
just the potentially individual glyph that needed to be reduced in size.
Unfortunately while adjusting the horizontal X width can be done for
each glyph in a run, the vertical Y height has to be adjusted for an
entire run. So the solution here was to split the individual glyph
needing shrinking out of the run into its own run so it can be shrunk.
2. On the ConPTY side, there was a long standing TODO that was never
completed to deal with a request to draw only the right half of a
two-column character. This meant that when encountering a request for
the right half only, we would transmit the entire full character to be
drawn, left and right halves, struck over the right half position. Now
we correct the cursor back a position (if space) and draw it out so the
right half is struck over where we believe the right half should be (and
the left half is updated as well as a consequence, which should be OK.)
The reason this happens right now is because despite VIM only updating
two cells in the buffer, the differential drawing calculation in the
ConPTY is very simplistic and intersects only rectangles. This means
from the top left most character drawn down to the row/col cursor count
indicator in vim's modeline are redrawn with each character typed. This
catches the line below the edited line in the typing and refreshes it.
But incorrectly.
We need to address making ConPTY smarter about what it draws
incrementally as it's clearly way too chatty. But I plan to do that with
some of the structures I will be creating to solve #778.
## Validation Steps Performed
- Ran the scenario listed in #2191 in vim in the Terminal
- Added unit tests similar to examples given around glyph/text mapping
in runs from Microsoft community page
2020-02-21 01:24:12 +01:00
|
|
|
#pragma warning(suppress : 26477) // We don't control tracelogging macros
|
2020-02-21 00:13:43 +01:00
|
|
|
TRACELOGGING_DEFINE_PROVIDER(g_hDxRenderProvider,
|
|
|
|
"Microsoft.Windows.Terminal.Renderer.DirectX",
|
|
|
|
// {c93e739e-ae50-5a14-78e7-f171e947535d}
|
|
|
|
(0xc93e739e, 0xae50, 0x5a14, 0x78, 0xe7, 0xf1, 0x71, 0xe9, 0x47, 0x53, 0x5d), );
|
|
|
|
|
2019-12-12 14:44:01 +01:00
|
|
|
// Quad where we draw the terminal.
|
|
|
|
// pos is world space coordinates where origin is at the center of screen.
|
|
|
|
// tex is texel coordinates where origin is top left.
|
|
|
|
// Layout the quad as a triangle strip where the _screenQuadVertices are place like so.
|
|
|
|
// 2 0
|
|
|
|
// 3 1
|
|
|
|
struct ShaderInput
|
|
|
|
{
|
|
|
|
XMFLOAT3 pos;
|
|
|
|
XMFLOAT2 tex;
|
|
|
|
} const _screenQuadVertices[] = {
|
|
|
|
{ XMFLOAT3(1.f, 1.f, 0.f), XMFLOAT2(1.f, 0.f) },
|
|
|
|
{ XMFLOAT3(1.f, -1.f, 0.f), XMFLOAT2(1.f, 1.f) },
|
|
|
|
{ XMFLOAT3(-1.f, 1.f, 0.f), XMFLOAT2(0.f, 0.f) },
|
|
|
|
{ XMFLOAT3(-1.f, -1.f, 0.f), XMFLOAT2(0.f, 1.f) },
|
|
|
|
};
|
|
|
|
|
|
|
|
D3D11_INPUT_ELEMENT_DESC _shaderInputLayout[] = {
|
|
|
|
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
|
|
|
|
{ "TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_PER_VERTEX_DATA, 0 }
|
|
|
|
};
|
|
|
|
|
2019-05-03 00:29:04 +02:00
|
|
|
#pragma hdrstop
|
|
|
|
|
|
|
|
static constexpr float POINTS_PER_INCH = 72.0f;
|
2019-11-26 01:30:45 +01:00
|
|
|
static constexpr std::wstring_view FALLBACK_FONT_FACES[] = { L"Consolas", L"Lucida Console", L"Courier New" };
|
2019-07-30 23:32:23 +02:00
|
|
|
static constexpr std::wstring_view FALLBACK_LOCALE = L"en-us";
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
using namespace Microsoft::Console::Render;
|
|
|
|
using namespace Microsoft::Console::Types;
|
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Constructs a DirectX-based renderer for console text
|
|
|
|
// which primarily uses DirectWrite on a Direct2D surface
|
2019-09-04 01:18:19 +02:00
|
|
|
#pragma warning(suppress : 26455)
|
2019-09-06 02:16:31 +02:00
|
|
|
// TODO GH 2683: The default constructor should not throw.
|
2019-05-03 00:29:04 +02:00
|
|
|
DxEngine::DxEngine() :
|
|
|
|
RenderEngineBase(),
|
2020-04-13 22:09:02 +02:00
|
|
|
_invalidateFullRows{ true },
|
|
|
|
_invalidMap{},
|
|
|
|
_invalidScroll{},
|
2020-07-17 21:32:36 +02:00
|
|
|
_allInvalid{ false },
|
2020-04-13 22:09:02 +02:00
|
|
|
_firstFrame{ true },
|
2019-05-03 00:29:04 +02:00
|
|
|
_presentParams{ 0 },
|
|
|
|
_presentReady{ false },
|
|
|
|
_presentScroll{ 0 },
|
|
|
|
_presentDirty{ 0 },
|
|
|
|
_presentOffset{ 0 },
|
|
|
|
_isEnabled{ false },
|
|
|
|
_isPainting{ false },
|
2020-04-13 22:09:02 +02:00
|
|
|
_displaySizePixels{},
|
2019-05-03 00:29:04 +02:00
|
|
|
_foregroundColor{ 0 },
|
|
|
|
_backgroundColor{ 0 },
|
2019-12-06 19:37:55 +01:00
|
|
|
_selectionBackground{},
|
2020-04-13 22:09:02 +02:00
|
|
|
_glyphCell{},
|
Scale box drawing glyphs to fit cells for visual bliss (#5743)
## Summary of the Pull Request
Identifies and scales glyphs in the box and line drawing ranges U+2500-U+259F to fit their cells.
## PR Checklist
* [x] Closes #455
* [x] I work here.
* [x] Manual tests. This is all graphical.
* [x] Metric ton of comments
* [x] Math spreadsheet included in PR.
* [x] Double check RTL glyphs.
* [x] Why is there the extra pixel?
* [x] Scrolling the mouse wheel check is done.
* [x] Not drawing outline?
* [x] Am core contributor. Roar.
* [x] Try suppressing negative scale factors and see if that gets rid of weird shading.
## Detailed Description of the Pull Request / Additional comments
### Background
- We want the Terminal to be fast at drawing. To be fast at drawing, we perform differential drawing, or only drawing what is different from the previous frame. We use DXGI's `Present1` method to help us with this as it helps us compose only the deltas onto the previous frame at drawing time and assists us in scrolling regions from the previous frame without intervention. However, it only works on strictly integer pixel row heights.
- Most of the hit testing and size-calculation logic in both the `conhost` and the Terminal products are based on the size of an individual cell. Historically, a cell was always dictated in a `COORD` structure, or two `SHORT` values... which are integers. As such, when we specify the space for any individual glyph to be displayed inside our terminal drawing region, we want it to fall perfectly inside of an integer box to ensure all these other algorithms work correctly and continue to do so.
- Finally, we want the Terminal to have font fallback and locate glyphs that aren't in the primary selected font from any other font it can find on the system that contains the glyph, per DirectWrite's font fallback mechanisms. These glyphs won't necessarily have the same font or glyph metrics as the base font, but we need them to fit inside the same cell dimensions as if they did because the hit testing and other algorithms aren't aware of which particular font is sourcing each glyph, just the dimensions of the bounding box per cell.
### How does Terminal deal with this?
- When we select a font, we perform some calculations using the design metrics of the font and glyphs to determine how we could fit them inside a cell with integer dimensions. Our process here is that we take the requested font size (which is generally a proxy for height), find the matching glyph width for that height then round it to an integer. We back convert from that now integer width to a height value which is almost certainly now a floating point number. But because we need an integer box value, we add line padding above and below the glyphs to ensure that the height is an integer as well as the width. Finally, we don't add the padding strictly equally. We attempt to align the English baseline of the glyph box directly onto an integer pixel multiple so most characters sit crisply on a line when displayed.
- Note that fonts and their glyphs have a prescribed baseline, line gap, and advance values. We use those as guidelines to get us started, but then to meet our requirements, we pad out from those. This results in fonts that should be properly authored showing gaps. It also results in fonts that are improperly authored looking even worse than they normally would.
### Now how does block and line drawing come in?
- Block and Line drawing glyphs are generally authored so they will look fine when the font and glyph metrics are followed exactly as prescribed by the font. (For some fonts, this still isn't true and we want them to look fine anyway.)
- When we add additional padding or rounding to make glyphs fit inside of a cell, we can be adding more space than was prescribed around these glyphs. This can cause a gap to be visible.
- Additionally, when we move things like baselines to land on a perfect integer pixel, we may be drawing a glyph lower in the bounding box than was prescribed originally.
### And how do we solve it?
- We identify all glyphs in the line and block drawing ranges.
- We find the bounding boxes of both the cell and the glyph.
- We compare the height of the glyph to the height of the cell to see if we need to scale. We prescribe a scale transform if the glyph wouldn't be tall enough to fit the box. (We leave it alone otherwise as some glyphs intentionally overscan the box and scaling them can cause banding effects.)
- We inspect the overhang/underhang above and below the boxes and translate transform them (slide them) so they cover the entire cell area.
- We repeat the previous two steps but in the horizontal direction.
## Validation Steps Performed
- See these commments:
- https://github.com/microsoft/terminal/issues/455#issuecomment-620248375
- https://github.com/microsoft/terminal/issues/455#issuecomment-621533916
- https://github.com/microsoft/terminal/issues/455#issuecomment-622585453
Also see the below one with more screenshots:
- https://github.com/microsoft/terminal/pull/5743#issuecomment-624940567
2020-05-08 23:09:32 +02:00
|
|
|
_boxDrawingEffect{},
|
2019-05-03 00:29:04 +02:00
|
|
|
_haveDeviceResources{ false },
|
2020-06-20 00:14:01 +02:00
|
|
|
_swapChainDesc{ 0 },
|
2020-06-11 00:35:14 +02:00
|
|
|
_swapChainFrameLatencyWaitableObject{ INVALID_HANDLE_VALUE },
|
2020-06-19 23:09:37 +02:00
|
|
|
_recreateDeviceRequested{ false },
|
2019-12-17 19:17:26 +01:00
|
|
|
_retroTerminalEffects{ false },
|
Add renderer settings to mitigate blurry text for some graphics devices
## Summary of the Pull Request
Adds user settings to adjust rendering behavior to mitigate blurry text on some devices.
## References
- #778 introduced this, almost certainly.
## PR Checklist
* [x] Closes #5759, mostly
* [x] I work here.
* [ ] We need community verification that this will help.
* [x] Updated schema and schema doc.
* [x] Am core contributor. Discussed in Monday sync meeting and w/ @DHowett-MSFT.
## Detailed Description of the Pull Request / Additional comments
When we switched from full-screen repaints to incremental rendering, it seems like we exposed a situation where some display drivers and hardware combinations do not handle scroll and/or dirty regions (from `IDXGISwapChain::Present1`) without blurring the data from the previous frame. As we're really close to ship, I'm offering two options to let people in this situation escape it on their own. We hope in the future to figure out what's actually going on here and mitigate it further in software, but until then, these escape hatches are available.
1. `experimental.rendering.forceFullRepaint` - This one restores the pre-778 behavior to the Terminal. On every single frame paint, we'll invalidate the entire screen and repaint it.
2. `experimental.rendering.software` - This one uses the software WARP renderer instead of using the hardware and display driver directly. The theory is that this will sidestep any driver bugs or hardware variations.
One, the other, or both of these may be field-applied by users who are experiencing this behavior.
Reverting #778 completely would also resolve this, but it would give back our largest performance win in the whole Terminal project. We don't believe that's acceptable when seemingly a majority of the users are experiencing the performance benefit with no detriment to graphical display.
## Validation Steps Performed
- [x] Flipped them on and verified with the debugger that they are being applied to the rendering pipeline
- [ ] Gave a private copy to community members in #5759 and had them try whether one, the other, or both resolved their issue.
2020-05-11 23:54:03 +02:00
|
|
|
_forceFullRepaintRendering{ false },
|
|
|
|
_softwareRendering{ false },
|
2020-02-25 23:19:57 +01:00
|
|
|
_antialiasingMode{ D2D1_TEXT_ANTIALIAS_MODE_GRAYSCALE },
|
2020-04-24 19:16:34 +02:00
|
|
|
_defaultTextBackgroundOpacity{ 1.0f },
|
2019-05-03 00:29:04 +02:00
|
|
|
_hwndTarget{ static_cast<HWND>(INVALID_HANDLE_VALUE) },
|
2020-04-13 22:09:02 +02:00
|
|
|
_sizeTarget{},
|
2019-05-03 00:29:04 +02:00
|
|
|
_dpi{ USER_DEFAULT_SCREEN_DPI },
|
|
|
|
_scale{ 1.0f },
|
Adjusts High DPI scaling to enable differential rendering (#5345)
## Summary of the Pull Request
- Adjusts scaling practices in `DxEngine` (and related scaling practices in `TerminalControl`) for pixel-perfect row baselines and spacing at High DPI such that differential row-by-row rendering can be applied at High DPI.
## References
- #5185
## PR Checklist
* [x] Closes #5320, closes #3515, closes #1064
* [x] I work here.
* [x] Manually tested.
* [x] No doc.
* [x] Am core contributor. Also discussed with some of them already via Teams.
## Detailed Description of the Pull Request / Additional comments
**WAS:**
- We were using implicit DPI scaling on the `ID2D1RenderTarget` and running all of our processing in DIPs (Device-Independent Pixels). That's all well and good for getting things bootstrapped quickly, but it leaves the actual scaling of the draw commands up to the discretion of the rendering target.
- When we don't get to explicitly choose exactly how many pixels tall/wide and our X/Y placement perfectly, the nature of floating point multiplication and division required to do the presentation can cause us to drift off slightly out of our control depending on what the final display resolution actually is.
- Differential drawing cannot work unless we can know the exact integer pixels that need to be copied/moved/preserved/replaced between frames to give to the `IDXGISwapChain1::Present1` method. If things spill into fractional pixels or the sizes of rows/columns vary as they are rounded up and down implicitly, then we cannot do the differential rendering.
**NOW:**
- When deciding on a font, the `DxEngine` will take the scale factor into account and adjust the proposed height of the requested font. Then the remainder of the existing code that adjusts the baseline and integer-ifies each character cell will run naturally from there. That code already works correctly to align the height at normal DPI and scale out the font heights and advances to take an exact integer of pixels.
- `TermControl` has to use the scale now, in some places, and stop scaling in other places. This has to do with how the target's nature used to be implicit and is now explicit. For instance, determining where the cursor click hits must be scaled now. And determining the pixel size of the display canvas must no longer be scaled.
- `DxEngine` will no longer attempt to scale the invalid regions per my attempts in #5185 because the cell size is scaled. So it should work the same as at 96 DPI.
- The block is removed from the `DxEngine` that was causing a full invalidate on every frame at High DPI.
- A TODO was removed from `TermControl` that was invalidating everything when the DPI changed because the underlying renderer will already do that.
## Validation Steps Performed
* [x] Check at 150% DPI. Print text, scroll text down and up, do selection.
* [x] Check at 100% DPI. Print text, scroll text down and up, do selection.
* [x] Span two different DPI monitors and drag between them.
* [x] Giant pile of tests in https://github.com/microsoft/terminal/pull/5345#issuecomment-614127648
Co-authored-by: Dustin Howett <duhowett@microsoft.com>
Co-authored-by: Mike Griese <migrie@microsoft.com>
2020-04-22 23:59:51 +02:00
|
|
|
_prevScale{ 1.0f },
|
2019-05-03 00:29:04 +02:00
|
|
|
_chainMode{ SwapChainMode::ForComposition },
|
2020-06-22 18:13:09 +02:00
|
|
|
_customLayout{},
|
|
|
|
_customRenderer{ ::Microsoft::WRL::Make<CustomTextRenderer>() },
|
|
|
|
_drawingContext{}
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2020-02-21 00:13:43 +01:00
|
|
|
const auto was = _tracelogCount.fetch_add(1);
|
|
|
|
if (0 == was)
|
|
|
|
{
|
|
|
|
TraceLoggingRegister(g_hDxRenderProvider);
|
|
|
|
}
|
|
|
|
|
2019-05-03 00:29:04 +02:00
|
|
|
THROW_IF_FAILED(D2D1CreateFactory(D2D1_FACTORY_TYPE_SINGLE_THREADED, IID_PPV_ARGS(&_d2dFactory)));
|
|
|
|
|
|
|
|
THROW_IF_FAILED(DWriteCreateFactory(
|
|
|
|
DWRITE_FACTORY_TYPE_SHARED,
|
|
|
|
__uuidof(_dwriteFactory),
|
2019-06-11 22:27:09 +02:00
|
|
|
reinterpret_cast<IUnknown**>(_dwriteFactory.GetAddressOf())));
|
2019-12-06 19:37:55 +01:00
|
|
|
|
|
|
|
// Initialize our default selection color to DEFAULT_FOREGROUND, but make
|
|
|
|
// sure to set to to a D2D1::ColorF
|
|
|
|
SetSelectionBackground(DEFAULT_FOREGROUND);
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Destroys an instance of the DirectX rendering engine
|
|
|
|
DxEngine::~DxEngine()
|
|
|
|
{
|
|
|
|
_ReleaseDeviceResources();
|
2020-02-21 00:13:43 +01:00
|
|
|
|
|
|
|
const auto was = _tracelogCount.fetch_sub(1);
|
|
|
|
if (1 == was)
|
|
|
|
{
|
|
|
|
TraceLoggingUnregister(g_hDxRenderProvider);
|
|
|
|
}
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Sets this engine to enabled allowing painting and presentation to occur
|
|
|
|
// Arguments:
|
|
|
|
// - <none>
|
|
|
|
// Return Value:
|
|
|
|
// - Generally S_OK, but might return a DirectX or memory error if
|
|
|
|
// resources need to be created or adjusted when enabling to prepare for draw
|
|
|
|
// Can give invalid state if you enable an enabled class.
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::Enable() noexcept
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
|
|
|
return _EnableDisplayAccess(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Routine Description:
|
2020-02-10 21:40:01 +01:00
|
|
|
// - Sets this engine to disabled to prevent painting and presentation from occurring
|
2019-05-03 00:29:04 +02:00
|
|
|
// Arguments:
|
|
|
|
// - <none>
|
|
|
|
// Return Value:
|
|
|
|
// - Should be OK. We might close/free resources, but that shouldn't error.
|
|
|
|
// Can give invalid state if you disable a disabled class.
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::Disable() noexcept
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
|
|
|
return _EnableDisplayAccess(false);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Helper to enable/disable painting/display access/presentation in a unified
|
|
|
|
// manner between enable/disable functions.
|
|
|
|
// Arguments:
|
|
|
|
// - outputEnabled - true to enable, false to disable
|
|
|
|
// Return Value:
|
|
|
|
// - Generally OK. Can return invalid state if you set to the state that is already
|
|
|
|
// active (enabling enabled, disabling disabled).
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::_EnableDisplayAccess(const bool outputEnabled) noexcept
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
|
|
|
// Invalid state if we're setting it to the same as what we already have.
|
|
|
|
RETURN_HR_IF(E_NOT_VALID_STATE, outputEnabled == _isEnabled);
|
|
|
|
|
|
|
|
_isEnabled = outputEnabled;
|
2019-06-11 22:27:09 +02:00
|
|
|
if (!_isEnabled)
|
|
|
|
{
|
2019-05-03 00:29:04 +02:00
|
|
|
_ReleaseDeviceResources();
|
|
|
|
}
|
|
|
|
|
|
|
|
return S_OK;
|
|
|
|
}
|
|
|
|
|
2019-12-12 14:44:01 +01:00
|
|
|
// Routine Description:
|
|
|
|
// - Compiles a shader source into binary blob.
|
|
|
|
// Arguments:
|
|
|
|
// - source - Shader source
|
|
|
|
// - target - What kind of shader this is
|
|
|
|
// - entry - Entry function of shader
|
|
|
|
// Return Value:
|
|
|
|
// - Compiled binary. Errors are thrown and logged.
|
|
|
|
inline Microsoft::WRL::ComPtr<ID3DBlob>
|
|
|
|
_CompileShader(
|
|
|
|
std::string source,
|
|
|
|
std::string target,
|
|
|
|
std::string entry = "main")
|
|
|
|
{
|
2020-01-23 01:42:56 +01:00
|
|
|
#ifdef __INSIDE_WINDOWS
|
|
|
|
THROW_HR(E_UNEXPECTED);
|
|
|
|
return 0;
|
|
|
|
#else
|
2019-12-12 14:44:01 +01:00
|
|
|
Microsoft::WRL::ComPtr<ID3DBlob> code{};
|
|
|
|
Microsoft::WRL::ComPtr<ID3DBlob> error{};
|
|
|
|
|
|
|
|
const HRESULT hr = D3DCompile(
|
|
|
|
source.c_str(),
|
|
|
|
source.size(),
|
|
|
|
nullptr,
|
|
|
|
nullptr,
|
|
|
|
nullptr,
|
|
|
|
entry.c_str(),
|
|
|
|
target.c_str(),
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
&code,
|
|
|
|
&error);
|
|
|
|
|
|
|
|
if (FAILED(hr))
|
|
|
|
{
|
|
|
|
LOG_HR_MSG(hr, "D3DCompile failed with %x.", static_cast<int>(hr));
|
|
|
|
if (error)
|
|
|
|
{
|
|
|
|
LOG_HR_MSG(hr, "D3DCompile error\n%*S", static_cast<int>(error->GetBufferSize()), static_cast<PWCHAR>(error->GetBufferPointer()));
|
|
|
|
}
|
|
|
|
|
|
|
|
THROW_HR(hr);
|
|
|
|
}
|
|
|
|
|
|
|
|
return code;
|
2020-01-23 01:42:56 +01:00
|
|
|
#endif
|
2019-12-12 14:44:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Setup D3D objects for doing shader things for terminal effects.
|
|
|
|
// Arguments:
|
|
|
|
// Return Value:
|
|
|
|
// - HRESULT status.
|
|
|
|
HRESULT DxEngine::_SetupTerminalEffects()
|
|
|
|
{
|
|
|
|
::Microsoft::WRL::ComPtr<ID3D11Texture2D> swapBuffer;
|
|
|
|
RETURN_IF_FAILED(_dxgiSwapChain->GetBuffer(0, __uuidof(ID3D11Texture2D), (LPVOID*)&swapBuffer));
|
|
|
|
|
|
|
|
// Setup render target.
|
|
|
|
RETURN_IF_FAILED(_d3dDevice->CreateRenderTargetView(swapBuffer.Get(), nullptr, &_renderTargetView));
|
|
|
|
|
|
|
|
// Setup _framebufferCapture, to where we'll copy current frame when rendering effects.
|
|
|
|
D3D11_TEXTURE2D_DESC framebufferCaptureDesc{};
|
|
|
|
swapBuffer->GetDesc(&framebufferCaptureDesc);
|
|
|
|
WI_SetFlag(framebufferCaptureDesc.BindFlags, D3D11_BIND_SHADER_RESOURCE);
|
|
|
|
RETURN_IF_FAILED(_d3dDevice->CreateTexture2D(&framebufferCaptureDesc, nullptr, &_framebufferCapture));
|
|
|
|
|
|
|
|
// Setup the viewport.
|
|
|
|
D3D11_VIEWPORT vp;
|
2020-04-13 22:09:02 +02:00
|
|
|
vp.Width = _displaySizePixels.width<float>();
|
|
|
|
vp.Height = _displaySizePixels.height<float>();
|
2019-12-12 14:44:01 +01:00
|
|
|
vp.MinDepth = 0.0f;
|
|
|
|
vp.MaxDepth = 1.0f;
|
|
|
|
vp.TopLeftX = 0;
|
|
|
|
vp.TopLeftY = 0;
|
|
|
|
_d3dDeviceContext->RSSetViewports(1, &vp);
|
|
|
|
|
|
|
|
// Prepare shaders.
|
|
|
|
auto vertexBlob = _CompileShader(screenVertexShaderString, "vs_5_0");
|
|
|
|
auto pixelBlob = _CompileShader(screenPixelShaderString, "ps_5_0");
|
2019-12-12 19:55:26 +01:00
|
|
|
// TODO:GH#3928 move the shader files to to hlsl files and package their
|
2019-12-12 14:44:01 +01:00
|
|
|
// build output to UWP app and load with these.
|
|
|
|
// ::Microsoft::WRL::ComPtr<ID3DBlob> vertexBlob, pixelBlob;
|
|
|
|
// RETURN_IF_FAILED(D3DReadFileToBlob(L"ScreenVertexShader.cso", &vertexBlob));
|
|
|
|
// RETURN_IF_FAILED(D3DReadFileToBlob(L"ScreenPixelShader.cso", &pixelBlob));
|
|
|
|
|
|
|
|
RETURN_IF_FAILED(_d3dDevice->CreateVertexShader(
|
|
|
|
vertexBlob->GetBufferPointer(),
|
|
|
|
vertexBlob->GetBufferSize(),
|
|
|
|
nullptr,
|
|
|
|
&_vertexShader));
|
|
|
|
|
|
|
|
RETURN_IF_FAILED(_d3dDevice->CreatePixelShader(
|
|
|
|
pixelBlob->GetBufferPointer(),
|
|
|
|
pixelBlob->GetBufferSize(),
|
|
|
|
nullptr,
|
|
|
|
&_pixelShader));
|
|
|
|
|
|
|
|
RETURN_IF_FAILED(_d3dDevice->CreateInputLayout(
|
|
|
|
static_cast<const D3D11_INPUT_ELEMENT_DESC*>(_shaderInputLayout),
|
|
|
|
ARRAYSIZE(_shaderInputLayout),
|
|
|
|
vertexBlob->GetBufferPointer(),
|
|
|
|
vertexBlob->GetBufferSize(),
|
|
|
|
&_vertexLayout));
|
|
|
|
|
|
|
|
// Create vertex buffer for screen quad.
|
|
|
|
D3D11_BUFFER_DESC bd{};
|
|
|
|
bd.Usage = D3D11_USAGE_DEFAULT;
|
|
|
|
bd.ByteWidth = sizeof(ShaderInput) * ARRAYSIZE(_screenQuadVertices);
|
|
|
|
bd.BindFlags = D3D11_BIND_VERTEX_BUFFER;
|
|
|
|
bd.CPUAccessFlags = 0;
|
|
|
|
|
|
|
|
D3D11_SUBRESOURCE_DATA InitData{};
|
|
|
|
InitData.pSysMem = static_cast<const void*>(_screenQuadVertices);
|
|
|
|
|
|
|
|
RETURN_IF_FAILED(_d3dDevice->CreateBuffer(&bd, &InitData, &_screenQuadVertexBuffer));
|
|
|
|
|
Scale retro terminal scan lines (#4716)
<!-- Enter a brief description/summary of your PR here. What does it fix/what does it change/how was it tested (even manually, if necessary)? -->
## Summary of the Pull Request
- Scale the retro terminal effects (#3468) scan lines with the screen's DPI.
- Remove artifacts from sampling wrap around.
Before & after, with my display scale set to 350%:
![Scaling scan lines](https://user-images.githubusercontent.com/38924837/75214566-df0f4780-5742-11ea-9bdc-3430eb24ccca.png)
Before & after showing artifact removal, with my display scale set to 100%, and image enlarged to 400%:
![Sampling artifacts annotated](https://user-images.githubusercontent.com/38924837/75214618-05cd7e00-5743-11ea-9060-f4eba257ea56.png)
<!-- Please review the items on the PR checklist before submitting-->
## PR Checklist
* [x] Closes #4362
* [x] CLA signed. If not, go over [here](https://cla.opensource.microsoft.com/microsoft/Terminal) and sign the CLA
* [ ] Tests added/passed
* [ ] Requires documentation to be updated
* [ ] I've discussed this with core contributors already. If not checked, I'm ready to accept this work might be rejected in favor of a different grand plan. Issue number where discussion took place: #xxx
<!-- Provide a more detailed description of the PR, other things fixed or any additional comments/features here -->
## Detailed Description of the Pull Request / Additional comments
Adds a constant buffer, which could be used for other settings for the retro terminal pixel shader.
I haven't touched C++ in over a decade before this change, and this is the first time I've played with DirectX, so please assume my code isn't exactly best practice. 🙂
<!-- Describe how you validated the behavior. Add automated tests wherever possible, but list manual validation steps taken as well -->
## Validation Steps Performed
- Changed display scale with experimental.retroTerminalEffect enabled, enjoyed scan lines on high resolution monitors.
- Enabled experimental.retroTerminalEffect, turned the setting off, changed display scale. Retro tabs still scale scan lines.
2020-02-26 01:08:45 +01:00
|
|
|
D3D11_BUFFER_DESC pixelShaderSettingsBufferDesc{};
|
|
|
|
pixelShaderSettingsBufferDesc.Usage = D3D11_USAGE_DEFAULT;
|
|
|
|
pixelShaderSettingsBufferDesc.ByteWidth = sizeof(_pixelShaderSettings);
|
|
|
|
pixelShaderSettingsBufferDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
|
|
|
|
|
|
|
|
_ComputePixelShaderSettings();
|
|
|
|
|
|
|
|
D3D11_SUBRESOURCE_DATA pixelShaderSettingsInitData{};
|
|
|
|
pixelShaderSettingsInitData.pSysMem = &_pixelShaderSettings;
|
|
|
|
|
|
|
|
RETURN_IF_FAILED(_d3dDevice->CreateBuffer(&pixelShaderSettingsBufferDesc, &pixelShaderSettingsInitData, &_pixelShaderSettingsBuffer));
|
|
|
|
|
2019-12-12 14:44:01 +01:00
|
|
|
// Sampler state is needed to use texture as input to shader.
|
|
|
|
D3D11_SAMPLER_DESC samplerDesc{};
|
|
|
|
samplerDesc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR;
|
Scale retro terminal scan lines (#4716)
<!-- Enter a brief description/summary of your PR here. What does it fix/what does it change/how was it tested (even manually, if necessary)? -->
## Summary of the Pull Request
- Scale the retro terminal effects (#3468) scan lines with the screen's DPI.
- Remove artifacts from sampling wrap around.
Before & after, with my display scale set to 350%:
![Scaling scan lines](https://user-images.githubusercontent.com/38924837/75214566-df0f4780-5742-11ea-9bdc-3430eb24ccca.png)
Before & after showing artifact removal, with my display scale set to 100%, and image enlarged to 400%:
![Sampling artifacts annotated](https://user-images.githubusercontent.com/38924837/75214618-05cd7e00-5743-11ea-9060-f4eba257ea56.png)
<!-- Please review the items on the PR checklist before submitting-->
## PR Checklist
* [x] Closes #4362
* [x] CLA signed. If not, go over [here](https://cla.opensource.microsoft.com/microsoft/Terminal) and sign the CLA
* [ ] Tests added/passed
* [ ] Requires documentation to be updated
* [ ] I've discussed this with core contributors already. If not checked, I'm ready to accept this work might be rejected in favor of a different grand plan. Issue number where discussion took place: #xxx
<!-- Provide a more detailed description of the PR, other things fixed or any additional comments/features here -->
## Detailed Description of the Pull Request / Additional comments
Adds a constant buffer, which could be used for other settings for the retro terminal pixel shader.
I haven't touched C++ in over a decade before this change, and this is the first time I've played with DirectX, so please assume my code isn't exactly best practice. 🙂
<!-- Describe how you validated the behavior. Add automated tests wherever possible, but list manual validation steps taken as well -->
## Validation Steps Performed
- Changed display scale with experimental.retroTerminalEffect enabled, enjoyed scan lines on high resolution monitors.
- Enabled experimental.retroTerminalEffect, turned the setting off, changed display scale. Retro tabs still scale scan lines.
2020-02-26 01:08:45 +01:00
|
|
|
samplerDesc.AddressU = D3D11_TEXTURE_ADDRESS_CLAMP;
|
|
|
|
samplerDesc.AddressV = D3D11_TEXTURE_ADDRESS_CLAMP;
|
|
|
|
samplerDesc.AddressW = D3D11_TEXTURE_ADDRESS_CLAMP;
|
2019-12-12 14:44:01 +01:00
|
|
|
samplerDesc.MipLODBias = 0.0f;
|
|
|
|
samplerDesc.MaxAnisotropy = 1;
|
|
|
|
samplerDesc.ComparisonFunc = D3D11_COMPARISON_ALWAYS;
|
|
|
|
samplerDesc.BorderColor[0] = 0;
|
|
|
|
samplerDesc.BorderColor[1] = 0;
|
|
|
|
samplerDesc.BorderColor[2] = 0;
|
|
|
|
samplerDesc.BorderColor[3] = 0;
|
|
|
|
samplerDesc.MinLOD = 0;
|
|
|
|
samplerDesc.MaxLOD = D3D11_FLOAT32_MAX;
|
|
|
|
|
|
|
|
// Create the texture sampler state.
|
|
|
|
RETURN_IF_FAILED(_d3dDevice->CreateSamplerState(&samplerDesc, &_samplerState));
|
|
|
|
|
|
|
|
return S_OK;
|
|
|
|
}
|
|
|
|
|
Scale retro terminal scan lines (#4716)
<!-- Enter a brief description/summary of your PR here. What does it fix/what does it change/how was it tested (even manually, if necessary)? -->
## Summary of the Pull Request
- Scale the retro terminal effects (#3468) scan lines with the screen's DPI.
- Remove artifacts from sampling wrap around.
Before & after, with my display scale set to 350%:
![Scaling scan lines](https://user-images.githubusercontent.com/38924837/75214566-df0f4780-5742-11ea-9bdc-3430eb24ccca.png)
Before & after showing artifact removal, with my display scale set to 100%, and image enlarged to 400%:
![Sampling artifacts annotated](https://user-images.githubusercontent.com/38924837/75214618-05cd7e00-5743-11ea-9060-f4eba257ea56.png)
<!-- Please review the items on the PR checklist before submitting-->
## PR Checklist
* [x] Closes #4362
* [x] CLA signed. If not, go over [here](https://cla.opensource.microsoft.com/microsoft/Terminal) and sign the CLA
* [ ] Tests added/passed
* [ ] Requires documentation to be updated
* [ ] I've discussed this with core contributors already. If not checked, I'm ready to accept this work might be rejected in favor of a different grand plan. Issue number where discussion took place: #xxx
<!-- Provide a more detailed description of the PR, other things fixed or any additional comments/features here -->
## Detailed Description of the Pull Request / Additional comments
Adds a constant buffer, which could be used for other settings for the retro terminal pixel shader.
I haven't touched C++ in over a decade before this change, and this is the first time I've played with DirectX, so please assume my code isn't exactly best practice. 🙂
<!-- Describe how you validated the behavior. Add automated tests wherever possible, but list manual validation steps taken as well -->
## Validation Steps Performed
- Changed display scale with experimental.retroTerminalEffect enabled, enjoyed scan lines on high resolution monitors.
- Enabled experimental.retroTerminalEffect, turned the setting off, changed display scale. Retro tabs still scale scan lines.
2020-02-26 01:08:45 +01:00
|
|
|
// Routine Description:
|
|
|
|
// - Puts the correct values in _pixelShaderSettings, so the struct can be
|
|
|
|
// passed the GPU.
|
|
|
|
// Arguments:
|
|
|
|
// - <none>
|
|
|
|
// Return Value:
|
|
|
|
// - <none>
|
|
|
|
void DxEngine::_ComputePixelShaderSettings() noexcept
|
|
|
|
{
|
|
|
|
// Retro scan lines alternate every pixel row at 100% scaling.
|
|
|
|
_pixelShaderSettings.ScaledScanLinePeriod = _scale * 1.0f;
|
|
|
|
|
|
|
|
// Gaussian distribution sigma used for blurring.
|
|
|
|
_pixelShaderSettings.ScaledGaussianSigma = _scale * 2.0f;
|
|
|
|
}
|
|
|
|
|
2019-05-03 00:29:04 +02:00
|
|
|
// Routine Description;
|
|
|
|
// - Creates device-specific resources required for drawing
|
|
|
|
// which generally means those that are represented on the GPU and can
|
|
|
|
// vary based on the monitor, display adapter, etc.
|
|
|
|
// - These may need to be recreated during the course of painting a frame
|
|
|
|
// should something about that hardware pipeline change.
|
|
|
|
// - Will free device resources that already existed as first operation.
|
|
|
|
// Arguments:
|
|
|
|
// - createSwapChain - If true, we create the entire rendering pipeline
|
|
|
|
// - If false, we just set up the adapter.
|
|
|
|
// Return Value:
|
|
|
|
// - Could be any DirectX/D3D/D2D/DXGI/DWrite error or memory issue.
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::_CreateDeviceResources(const bool createSwapChain) noexcept
|
2020-04-13 22:09:02 +02:00
|
|
|
try
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
|
|
|
if (_haveDeviceResources)
|
|
|
|
{
|
|
|
|
_ReleaseDeviceResources();
|
|
|
|
}
|
|
|
|
|
2019-08-30 00:23:07 +02:00
|
|
|
auto freeOnFail = wil::scope_exit([&]() noexcept { _ReleaseDeviceResources(); });
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
RETURN_IF_FAILED(CreateDXGIFactory1(IID_PPV_ARGS(&_dxgiFactory2)));
|
|
|
|
|
|
|
|
const DWORD DeviceFlags = D3D11_CREATE_DEVICE_BGRA_SUPPORT |
|
2019-06-11 22:27:09 +02:00
|
|
|
// clang-format off
|
2019-07-30 23:32:23 +02:00
|
|
|
// This causes problems for folks who do not have the whole DirectX SDK installed
|
|
|
|
// when they try to run the rest of the project in debug mode.
|
|
|
|
// As such, I'm leaving this flag here for people doing DX-specific work to toggle it
|
|
|
|
// only when they need it and shutting it off otherwise.
|
|
|
|
// Find out more about the debug layer here:
|
|
|
|
// https://docs.microsoft.com/en-us/windows/desktop/direct3d11/overviews-direct3d-11-devices-layers
|
|
|
|
// You can find out how to install it here:
|
|
|
|
// https://docs.microsoft.com/en-us/windows/uwp/gaming/use-the-directx-runtime-and-visual-studio-graphics-diagnostic-features
|
2019-06-11 22:27:09 +02:00
|
|
|
// clang-format on
|
|
|
|
// D3D11_CREATE_DEVICE_DEBUG |
|
|
|
|
D3D11_CREATE_DEVICE_SINGLETHREADED;
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2019-09-06 02:16:31 +02:00
|
|
|
const std::array<D3D_FEATURE_LEVEL, 5> FeatureLevels{ D3D_FEATURE_LEVEL_11_1,
|
|
|
|
D3D_FEATURE_LEVEL_11_0,
|
|
|
|
D3D_FEATURE_LEVEL_10_1,
|
|
|
|
D3D_FEATURE_LEVEL_10_0,
|
|
|
|
D3D_FEATURE_LEVEL_9_1 };
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2019-06-25 02:02:26 +02:00
|
|
|
// Trying hardware first for maximum performance, then trying WARP (software) renderer second
|
|
|
|
// in case we're running inside a downlevel VM where hardware passthrough isn't enabled like
|
|
|
|
// for Windows 7 in a VM.
|
Add renderer settings to mitigate blurry text for some graphics devices
## Summary of the Pull Request
Adds user settings to adjust rendering behavior to mitigate blurry text on some devices.
## References
- #778 introduced this, almost certainly.
## PR Checklist
* [x] Closes #5759, mostly
* [x] I work here.
* [ ] We need community verification that this will help.
* [x] Updated schema and schema doc.
* [x] Am core contributor. Discussed in Monday sync meeting and w/ @DHowett-MSFT.
## Detailed Description of the Pull Request / Additional comments
When we switched from full-screen repaints to incremental rendering, it seems like we exposed a situation where some display drivers and hardware combinations do not handle scroll and/or dirty regions (from `IDXGISwapChain::Present1`) without blurring the data from the previous frame. As we're really close to ship, I'm offering two options to let people in this situation escape it on their own. We hope in the future to figure out what's actually going on here and mitigate it further in software, but until then, these escape hatches are available.
1. `experimental.rendering.forceFullRepaint` - This one restores the pre-778 behavior to the Terminal. On every single frame paint, we'll invalidate the entire screen and repaint it.
2. `experimental.rendering.software` - This one uses the software WARP renderer instead of using the hardware and display driver directly. The theory is that this will sidestep any driver bugs or hardware variations.
One, the other, or both of these may be field-applied by users who are experiencing this behavior.
Reverting #778 completely would also resolve this, but it would give back our largest performance win in the whole Terminal project. We don't believe that's acceptable when seemingly a majority of the users are experiencing the performance benefit with no detriment to graphical display.
## Validation Steps Performed
- [x] Flipped them on and verified with the debugger that they are being applied to the rendering pipeline
- [ ] Gave a private copy to community members in #5759 and had them try whether one, the other, or both resolved their issue.
2020-05-11 23:54:03 +02:00
|
|
|
HRESULT hardwareResult = E_NOT_SET;
|
|
|
|
|
|
|
|
// If we're not forcing software rendering, try hardware first.
|
|
|
|
// Otherwise, let the error state fall down and create with the software renderer directly.
|
|
|
|
if (!_softwareRendering)
|
|
|
|
{
|
|
|
|
hardwareResult = D3D11CreateDevice(nullptr,
|
|
|
|
D3D_DRIVER_TYPE_HARDWARE,
|
|
|
|
nullptr,
|
|
|
|
DeviceFlags,
|
|
|
|
FeatureLevels.data(),
|
|
|
|
gsl::narrow_cast<UINT>(FeatureLevels.size()),
|
|
|
|
D3D11_SDK_VERSION,
|
|
|
|
&_d3dDevice,
|
|
|
|
nullptr,
|
|
|
|
&_d3dDeviceContext);
|
|
|
|
}
|
2019-06-25 02:02:26 +02:00
|
|
|
|
|
|
|
if (FAILED(hardwareResult))
|
|
|
|
{
|
2019-08-29 20:12:55 +02:00
|
|
|
RETURN_IF_FAILED(D3D11CreateDevice(nullptr,
|
2019-06-25 02:02:26 +02:00
|
|
|
D3D_DRIVER_TYPE_WARP,
|
2019-08-29 20:12:55 +02:00
|
|
|
nullptr,
|
2019-06-25 02:02:26 +02:00
|
|
|
DeviceFlags,
|
2019-09-03 18:40:31 +02:00
|
|
|
FeatureLevels.data(),
|
2019-09-06 02:16:31 +02:00
|
|
|
gsl::narrow_cast<UINT>(FeatureLevels.size()),
|
2019-06-25 02:02:26 +02:00
|
|
|
D3D11_SDK_VERSION,
|
|
|
|
&_d3dDevice,
|
2019-08-29 20:12:55 +02:00
|
|
|
nullptr,
|
2019-06-25 02:02:26 +02:00
|
|
|
&_d3dDeviceContext));
|
|
|
|
}
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
_displaySizePixels = _GetClientSize();
|
|
|
|
|
2020-06-20 00:14:01 +02:00
|
|
|
// Get the other device types so we have deeper access to more functionality
|
|
|
|
// in our pipeline than by just walking straight from the D3D device.
|
|
|
|
|
|
|
|
RETURN_IF_FAILED(_d3dDevice.As(&_dxgiDevice));
|
|
|
|
RETURN_IF_FAILED(_d2dFactory->CreateDevice(_dxgiDevice.Get(), _d2dDevice.ReleaseAndGetAddressOf()));
|
|
|
|
|
|
|
|
// Create a device context out of it (supercedes render targets)
|
|
|
|
RETURN_IF_FAILED(_d2dDevice->CreateDeviceContext(D2D1_DEVICE_CONTEXT_OPTIONS_NONE, &_d2dDeviceContext));
|
|
|
|
|
2019-06-11 22:27:09 +02:00
|
|
|
if (createSwapChain)
|
|
|
|
{
|
2020-06-20 00:14:01 +02:00
|
|
|
_swapChainDesc = { 0 };
|
|
|
|
_swapChainDesc.Flags = 0;
|
2020-06-11 00:35:14 +02:00
|
|
|
|
|
|
|
// requires DXGI 1.3 which was introduced in Windows 8.1
|
2020-06-20 00:14:01 +02:00
|
|
|
WI_SetFlagIf(_swapChainDesc.Flags, DXGI_SWAP_CHAIN_FLAG_FRAME_LATENCY_WAITABLE_OBJECT, IsWindows8Point1OrGreater());
|
|
|
|
|
|
|
|
_swapChainDesc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
|
|
|
|
_swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
|
|
|
|
_swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
|
|
|
|
_swapChainDesc.BufferCount = 2;
|
|
|
|
_swapChainDesc.SampleDesc.Count = 1;
|
|
|
|
_swapChainDesc.AlphaMode = DXGI_ALPHA_MODE_UNSPECIFIED;
|
|
|
|
_swapChainDesc.Scaling = DXGI_SCALING_NONE;
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
switch (_chainMode)
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2020-04-13 22:09:02 +02:00
|
|
|
case SwapChainMode::ForHwnd:
|
|
|
|
{
|
|
|
|
// use the HWND's dimensions for the swap chain dimensions.
|
|
|
|
RECT rect = { 0 };
|
|
|
|
RETURN_IF_WIN32_BOOL_FALSE(GetClientRect(_hwndTarget, &rect));
|
|
|
|
|
2020-06-20 00:14:01 +02:00
|
|
|
_swapChainDesc.Width = rect.right - rect.left;
|
|
|
|
_swapChainDesc.Height = rect.bottom - rect.top;
|
2020-04-13 22:09:02 +02:00
|
|
|
|
|
|
|
// We can't do alpha for HWNDs. Set to ignore. It will fail otherwise.
|
2020-06-20 00:14:01 +02:00
|
|
|
_swapChainDesc.AlphaMode = DXGI_ALPHA_MODE_IGNORE;
|
2020-04-13 22:09:02 +02:00
|
|
|
const auto createSwapChainResult = _dxgiFactory2->CreateSwapChainForHwnd(_d3dDevice.Get(),
|
|
|
|
_hwndTarget,
|
2020-06-20 00:14:01 +02:00
|
|
|
&_swapChainDesc,
|
2020-04-13 22:09:02 +02:00
|
|
|
nullptr,
|
|
|
|
nullptr,
|
|
|
|
&_dxgiSwapChain);
|
|
|
|
if (FAILED(createSwapChainResult))
|
2019-08-30 00:23:07 +02:00
|
|
|
{
|
2020-06-20 00:14:01 +02:00
|
|
|
_swapChainDesc.Scaling = DXGI_SCALING_STRETCH;
|
2020-04-13 22:09:02 +02:00
|
|
|
RETURN_IF_FAILED(_dxgiFactory2->CreateSwapChainForHwnd(_d3dDevice.Get(),
|
|
|
|
_hwndTarget,
|
2020-06-20 00:14:01 +02:00
|
|
|
&_swapChainDesc,
|
2020-04-13 22:09:02 +02:00
|
|
|
nullptr,
|
|
|
|
nullptr,
|
|
|
|
&_dxgiSwapChain));
|
2019-08-30 00:23:07 +02:00
|
|
|
}
|
2019-12-12 14:44:01 +01:00
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case SwapChainMode::ForComposition:
|
|
|
|
{
|
|
|
|
// Use the given target size for compositions.
|
2020-06-20 00:14:01 +02:00
|
|
|
_swapChainDesc.Width = _displaySizePixels.width<UINT>();
|
|
|
|
_swapChainDesc.Height = _displaySizePixels.height<UINT>();
|
2020-04-13 22:09:02 +02:00
|
|
|
|
|
|
|
// We're doing advanced composition pretty much for the purpose of pretty alpha, so turn it on.
|
2020-06-20 00:14:01 +02:00
|
|
|
_swapChainDesc.AlphaMode = DXGI_ALPHA_MODE_PREMULTIPLIED;
|
2020-04-13 22:09:02 +02:00
|
|
|
// It's 100% required to use scaling mode stretch for composition. There is no other choice.
|
2020-06-20 00:14:01 +02:00
|
|
|
_swapChainDesc.Scaling = DXGI_SCALING_STRETCH;
|
2020-04-13 22:09:02 +02:00
|
|
|
|
|
|
|
RETURN_IF_FAILED(_dxgiFactory2->CreateSwapChainForComposition(_d3dDevice.Get(),
|
2020-06-20 00:14:01 +02:00
|
|
|
&_swapChainDesc,
|
2020-04-13 22:09:02 +02:00
|
|
|
nullptr,
|
|
|
|
&_dxgiSwapChain));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
THROW_HR(E_NOTIMPL);
|
|
|
|
}
|
|
|
|
|
2020-06-11 00:35:14 +02:00
|
|
|
if (IsWindows8Point1OrGreater())
|
|
|
|
{
|
|
|
|
::Microsoft::WRL::ComPtr<IDXGISwapChain2> swapChain2;
|
|
|
|
const HRESULT asResult = _dxgiSwapChain.As(&swapChain2);
|
|
|
|
if (SUCCEEDED(asResult))
|
|
|
|
{
|
|
|
|
_swapChainFrameLatencyWaitableObject = wil::unique_handle{ swapChain2->GetFrameLatencyWaitableObject() };
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LOG_HR_MSG(asResult, "Failed to obtain IDXGISwapChain2 from swap chain");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
if (_retroTerminalEffects)
|
|
|
|
{
|
|
|
|
const HRESULT hr = _SetupTerminalEffects();
|
|
|
|
if (FAILED(hr))
|
2019-12-12 14:44:01 +01:00
|
|
|
{
|
2020-04-13 22:09:02 +02:00
|
|
|
_retroTerminalEffects = false;
|
|
|
|
LOG_HR_MSG(hr, "Failed to setup terminal effects. Disabling.");
|
2019-12-12 14:44:01 +01:00
|
|
|
}
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// With a new swap chain, mark the entire thing as invalid.
|
|
|
|
RETURN_IF_FAILED(InvalidateAll());
|
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
// This is our first frame on this new target.
|
|
|
|
_firstFrame = true;
|
|
|
|
|
2019-05-03 00:29:04 +02:00
|
|
|
RETURN_IF_FAILED(_PrepareRenderTarget());
|
|
|
|
}
|
|
|
|
|
|
|
|
_haveDeviceResources = true;
|
2019-06-11 22:27:09 +02:00
|
|
|
if (_isPainting)
|
|
|
|
{
|
2019-05-03 00:29:04 +02:00
|
|
|
// TODO: MSFT: 21169176 - remove this or restore the "try a few times to render" code... I think
|
2020-06-20 00:14:01 +02:00
|
|
|
_d2dDeviceContext->BeginDraw();
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
freeOnFail.release(); // don't need to release if we made it to the bottom and everything was good.
|
|
|
|
|
|
|
|
// Notify that swap chain changed.
|
2019-09-04 01:18:19 +02:00
|
|
|
|
2019-05-03 00:29:04 +02:00
|
|
|
if (_pfn)
|
|
|
|
{
|
2019-08-30 00:23:07 +02:00
|
|
|
try
|
|
|
|
{
|
|
|
|
_pfn();
|
|
|
|
}
|
|
|
|
CATCH_LOG(); // A failure in the notification function isn't a failure to prepare, so just log it and go on.
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return S_OK;
|
|
|
|
}
|
2020-04-13 22:09:02 +02:00
|
|
|
CATCH_RETURN();
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2020-06-20 00:14:01 +02:00
|
|
|
static constexpr D2D1_ALPHA_MODE _dxgiAlphaToD2d1Alpha(DXGI_ALPHA_MODE mode) noexcept
|
|
|
|
{
|
|
|
|
switch (mode)
|
|
|
|
{
|
|
|
|
case DXGI_ALPHA_MODE_PREMULTIPLIED:
|
|
|
|
return D2D1_ALPHA_MODE_PREMULTIPLIED;
|
|
|
|
case DXGI_ALPHA_MODE_STRAIGHT:
|
|
|
|
return D2D1_ALPHA_MODE_STRAIGHT;
|
|
|
|
case DXGI_ALPHA_MODE_IGNORE:
|
|
|
|
return D2D1_ALPHA_MODE_IGNORE;
|
|
|
|
case DXGI_ALPHA_MODE_FORCE_DWORD:
|
|
|
|
return D2D1_ALPHA_MODE_FORCE_DWORD;
|
|
|
|
default:
|
|
|
|
case DXGI_ALPHA_MODE_UNSPECIFIED:
|
|
|
|
return D2D1_ALPHA_MODE_UNKNOWN;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::_PrepareRenderTarget() noexcept
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2019-08-30 00:23:07 +02:00
|
|
|
try
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2020-06-20 00:14:01 +02:00
|
|
|
// Pull surface out of swap chain.
|
2019-08-30 00:23:07 +02:00
|
|
|
RETURN_IF_FAILED(_dxgiSwapChain->GetBuffer(0, IID_PPV_ARGS(&_dxgiSurface)));
|
|
|
|
|
2020-06-20 00:14:01 +02:00
|
|
|
// Make a bitmap and bind it to the swap chain surface
|
|
|
|
const auto bitmapProperties = D2D1::BitmapProperties1(
|
|
|
|
D2D1_BITMAP_OPTIONS_TARGET | D2D1_BITMAP_OPTIONS_CANNOT_DRAW,
|
|
|
|
D2D1::PixelFormat(_swapChainDesc.Format, _dxgiAlphaToD2d1Alpha(_swapChainDesc.AlphaMode)));
|
|
|
|
|
|
|
|
RETURN_IF_FAILED(_d2dDeviceContext->CreateBitmapFromDxgiSurface(_dxgiSurface.Get(), bitmapProperties, &_d2dBitmap));
|
2019-08-30 00:23:07 +02:00
|
|
|
|
2020-06-20 00:14:01 +02:00
|
|
|
// Assign that bitmap as the target of the D2D device context. Draw commands hit the context
|
|
|
|
// and are backed by the bitmap which is bound to the swap chain which goes on to be presented.
|
|
|
|
// (The foot bone connected to the leg bone,
|
|
|
|
// The leg bone connected to the knee bone,
|
|
|
|
// The knee bone connected to the thigh bone
|
|
|
|
// ... and so on)
|
|
|
|
|
|
|
|
_d2dDeviceContext->SetTarget(_d2dBitmap.Get());
|
2019-08-30 00:23:07 +02:00
|
|
|
|
2020-03-28 01:15:50 +01:00
|
|
|
// We need the AntialiasMode for non-text object to be Aliased to ensure
|
|
|
|
// that background boxes line up with each other and don't leave behind
|
|
|
|
// stray colors.
|
|
|
|
// See GH#3626 for more details.
|
2020-06-20 00:14:01 +02:00
|
|
|
_d2dDeviceContext->SetAntialiasMode(D2D1_ANTIALIAS_MODE_ALIASED);
|
|
|
|
_d2dDeviceContext->SetTextAntialiasMode(_antialiasingMode);
|
2020-02-25 23:19:57 +01:00
|
|
|
|
2020-06-20 00:14:01 +02:00
|
|
|
RETURN_IF_FAILED(_d2dDeviceContext->CreateSolidColorBrush(D2D1::ColorF(D2D1::ColorF::DarkRed),
|
|
|
|
&_d2dBrushBackground));
|
2019-08-30 00:23:07 +02:00
|
|
|
|
2020-06-20 00:14:01 +02:00
|
|
|
RETURN_IF_FAILED(_d2dDeviceContext->CreateSolidColorBrush(D2D1::ColorF(D2D1::ColorF::White),
|
|
|
|
&_d2dBrushForeground));
|
2019-08-30 00:23:07 +02:00
|
|
|
|
|
|
|
const D2D1_STROKE_STYLE_PROPERTIES strokeStyleProperties{
|
|
|
|
D2D1_CAP_STYLE_SQUARE, // startCap
|
|
|
|
D2D1_CAP_STYLE_SQUARE, // endCap
|
|
|
|
D2D1_CAP_STYLE_SQUARE, // dashCap
|
|
|
|
D2D1_LINE_JOIN_MITER, // lineJoin
|
|
|
|
0.f, // miterLimit
|
|
|
|
D2D1_DASH_STYLE_SOLID, // dashStyle
|
|
|
|
0.f, // dashOffset
|
|
|
|
};
|
|
|
|
RETURN_IF_FAILED(_d2dFactory->CreateStrokeStyle(&strokeStyleProperties, nullptr, 0, &_strokeStyle));
|
|
|
|
|
|
|
|
// If in composition mode, apply scaling factor matrix
|
|
|
|
if (_chainMode == SwapChainMode::ForComposition)
|
|
|
|
{
|
|
|
|
DXGI_MATRIX_3X2_F inverseScale = { 0 };
|
|
|
|
inverseScale._11 = 1.0f / _scale;
|
|
|
|
inverseScale._22 = inverseScale._11;
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2019-08-30 00:23:07 +02:00
|
|
|
::Microsoft::WRL::ComPtr<IDXGISwapChain2> sc2;
|
|
|
|
RETURN_IF_FAILED(_dxgiSwapChain.As(&sc2));
|
|
|
|
RETURN_IF_FAILED(sc2->SetMatrixTransform(&inverseScale));
|
|
|
|
}
|
Adjusts High DPI scaling to enable differential rendering (#5345)
## Summary of the Pull Request
- Adjusts scaling practices in `DxEngine` (and related scaling practices in `TerminalControl`) for pixel-perfect row baselines and spacing at High DPI such that differential row-by-row rendering can be applied at High DPI.
## References
- #5185
## PR Checklist
* [x] Closes #5320, closes #3515, closes #1064
* [x] I work here.
* [x] Manually tested.
* [x] No doc.
* [x] Am core contributor. Also discussed with some of them already via Teams.
## Detailed Description of the Pull Request / Additional comments
**WAS:**
- We were using implicit DPI scaling on the `ID2D1RenderTarget` and running all of our processing in DIPs (Device-Independent Pixels). That's all well and good for getting things bootstrapped quickly, but it leaves the actual scaling of the draw commands up to the discretion of the rendering target.
- When we don't get to explicitly choose exactly how many pixels tall/wide and our X/Y placement perfectly, the nature of floating point multiplication and division required to do the presentation can cause us to drift off slightly out of our control depending on what the final display resolution actually is.
- Differential drawing cannot work unless we can know the exact integer pixels that need to be copied/moved/preserved/replaced between frames to give to the `IDXGISwapChain1::Present1` method. If things spill into fractional pixels or the sizes of rows/columns vary as they are rounded up and down implicitly, then we cannot do the differential rendering.
**NOW:**
- When deciding on a font, the `DxEngine` will take the scale factor into account and adjust the proposed height of the requested font. Then the remainder of the existing code that adjusts the baseline and integer-ifies each character cell will run naturally from there. That code already works correctly to align the height at normal DPI and scale out the font heights and advances to take an exact integer of pixels.
- `TermControl` has to use the scale now, in some places, and stop scaling in other places. This has to do with how the target's nature used to be implicit and is now explicit. For instance, determining where the cursor click hits must be scaled now. And determining the pixel size of the display canvas must no longer be scaled.
- `DxEngine` will no longer attempt to scale the invalid regions per my attempts in #5185 because the cell size is scaled. So it should work the same as at 96 DPI.
- The block is removed from the `DxEngine` that was causing a full invalidate on every frame at High DPI.
- A TODO was removed from `TermControl` that was invalidating everything when the DPI changed because the underlying renderer will already do that.
## Validation Steps Performed
* [x] Check at 150% DPI. Print text, scroll text down and up, do selection.
* [x] Check at 100% DPI. Print text, scroll text down and up, do selection.
* [x] Span two different DPI monitors and drag between them.
* [x] Giant pile of tests in https://github.com/microsoft/terminal/pull/5345#issuecomment-614127648
Co-authored-by: Dustin Howett <duhowett@microsoft.com>
Co-authored-by: Mike Griese <migrie@microsoft.com>
2020-04-22 23:59:51 +02:00
|
|
|
|
|
|
|
_prevScale = _scale;
|
2019-08-30 00:23:07 +02:00
|
|
|
return S_OK;
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
2019-08-30 00:23:07 +02:00
|
|
|
CATCH_RETURN();
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Releases device-specific resources (typically held on the GPU)
|
|
|
|
// Arguments:
|
|
|
|
// - <none>
|
|
|
|
// Return Value:
|
|
|
|
// - <none>
|
|
|
|
void DxEngine::_ReleaseDeviceResources() noexcept
|
|
|
|
{
|
2019-08-30 00:23:07 +02:00
|
|
|
try
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2019-08-30 00:23:07 +02:00
|
|
|
_haveDeviceResources = false;
|
2020-06-19 23:09:37 +02:00
|
|
|
|
|
|
|
_pixelShaderSettingsBuffer.Reset();
|
|
|
|
|
2019-08-30 00:23:07 +02:00
|
|
|
_d2dBrushForeground.Reset();
|
|
|
|
_d2dBrushBackground.Reset();
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2020-06-20 00:14:01 +02:00
|
|
|
_d2dBitmap.Reset();
|
|
|
|
|
|
|
|
if (nullptr != _d2dDeviceContext.Get() && _isPainting)
|
2019-08-30 00:23:07 +02:00
|
|
|
{
|
2020-06-20 00:14:01 +02:00
|
|
|
_d2dDeviceContext->EndDraw();
|
2019-08-30 00:23:07 +02:00
|
|
|
}
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2020-06-20 00:14:01 +02:00
|
|
|
_d2dDeviceContext.Reset();
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2019-08-30 00:23:07 +02:00
|
|
|
_dxgiSurface.Reset();
|
|
|
|
_dxgiSwapChain.Reset();
|
2020-06-11 00:35:14 +02:00
|
|
|
_swapChainFrameLatencyWaitableObject.reset();
|
2019-08-30 00:23:07 +02:00
|
|
|
|
2020-06-20 00:14:01 +02:00
|
|
|
_d2dDevice.Reset();
|
|
|
|
_dxgiDevice.Reset();
|
|
|
|
|
2019-08-30 00:23:07 +02:00
|
|
|
if (nullptr != _d3dDeviceContext.Get())
|
|
|
|
{
|
|
|
|
// To ensure the swap chain goes away we must unbind any views from the
|
|
|
|
// D3D pipeline
|
|
|
|
_d3dDeviceContext->OMSetRenderTargets(0, nullptr, nullptr);
|
|
|
|
}
|
|
|
|
_d3dDeviceContext.Reset();
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2019-08-30 00:23:07 +02:00
|
|
|
_d3dDevice.Reset();
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2019-08-30 00:23:07 +02:00
|
|
|
_dxgiFactory2.Reset();
|
|
|
|
}
|
|
|
|
CATCH_LOG();
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
|
|
|
|
2020-06-22 18:13:09 +02:00
|
|
|
// Routine Description:
|
|
|
|
// - Calculates whether or not we should force grayscale AA based on the
|
|
|
|
// current renderer state.
|
|
|
|
// Arguments:
|
|
|
|
// - <none> - Uses internal state of _antialiasingMode, _defaultTextBackgroundOpacity,
|
|
|
|
// _backgroundColor, and _defaultBackgroundColor.
|
|
|
|
// Return Value:
|
|
|
|
// - True if we must render this text in grayscale AA as cleartype simply won't work. False otherwise.
|
|
|
|
[[nodiscard]] bool DxEngine::_ShouldForceGrayscaleAA() noexcept
|
|
|
|
{
|
|
|
|
// GH#5098: If we're rendering with cleartype text, we need to always
|
|
|
|
// render onto an opaque background. If our background's opacity is
|
|
|
|
// 1.0f, that's great, we can use that. Otherwise, we need to force the
|
|
|
|
// text renderer to render this text in grayscale. In
|
|
|
|
// UpdateDrawingBrushes, we'll set the backgroundColor's a channel to
|
|
|
|
// 1.0 if we're in cleartype mode and the background's opacity is 1.0.
|
|
|
|
// Otherwise, at this point, the _backgroundColor's alpha is <1.0.
|
|
|
|
//
|
|
|
|
// Currently, only text with the default background color uses an alpha
|
|
|
|
// of 0, every other background uses 1.0
|
|
|
|
//
|
|
|
|
// DANGER: Layers slow us down. Only do this in the specific case where
|
|
|
|
// someone has chosen the slower ClearType antialiasing (versus the faster
|
|
|
|
// grayscale antialiasing)
|
|
|
|
const bool usingCleartype = _antialiasingMode == D2D1_TEXT_ANTIALIAS_MODE_CLEARTYPE;
|
|
|
|
const bool usingTransparency = _defaultTextBackgroundOpacity != 1.0f;
|
|
|
|
// Another way of naming "bgIsDefault" is "bgHasTransparency"
|
|
|
|
const auto bgIsDefault = (_backgroundColor.a == _defaultBackgroundColor.a) &&
|
|
|
|
(_backgroundColor.r == _defaultBackgroundColor.r) &&
|
|
|
|
(_backgroundColor.g == _defaultBackgroundColor.g) &&
|
|
|
|
(_backgroundColor.b == _defaultBackgroundColor.b);
|
|
|
|
const bool forceGrayscaleAA = usingCleartype &&
|
|
|
|
usingTransparency &&
|
|
|
|
bgIsDefault;
|
|
|
|
|
|
|
|
return forceGrayscaleAA;
|
|
|
|
}
|
|
|
|
|
2019-05-03 00:29:04 +02:00
|
|
|
// Routine Description:
|
|
|
|
// - Helper to create a DirectWrite text layout object
|
|
|
|
// out of a string.
|
|
|
|
// Arguments:
|
|
|
|
// - string - The text to attempt to layout
|
|
|
|
// - stringLength - Length of string above in characters
|
|
|
|
// - ppTextLayout - Location to receive new layout object
|
|
|
|
// Return Value:
|
|
|
|
// - S_OK if layout created successfully, otherwise a DirectWrite error
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::_CreateTextLayout(
|
2019-05-03 00:29:04 +02:00
|
|
|
_In_reads_(stringLength) PCWCHAR string,
|
|
|
|
_In_ size_t stringLength,
|
2019-06-11 22:27:09 +02:00
|
|
|
_Out_ IDWriteTextLayout** ppTextLayout) noexcept
|
2020-04-13 22:09:02 +02:00
|
|
|
try
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
|
|
|
return _dwriteFactory->CreateTextLayout(string,
|
2019-08-29 22:19:01 +02:00
|
|
|
gsl::narrow<UINT32>(stringLength),
|
2019-05-03 00:29:04 +02:00
|
|
|
_dwriteTextFormat.Get(),
|
2020-04-13 22:09:02 +02:00
|
|
|
_displaySizePixels.width<float>(),
|
|
|
|
_glyphCell.height() != 0 ? _glyphCell.height<float>() : _displaySizePixels.height<float>(),
|
2019-05-03 00:29:04 +02:00
|
|
|
ppTextLayout);
|
|
|
|
}
|
2020-04-13 22:09:02 +02:00
|
|
|
CATCH_RETURN()
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Sets the target window handle for our display pipeline
|
|
|
|
// - We will take over the surface of this window for drawing
|
|
|
|
// Arguments:
|
|
|
|
// - hwnd - Window handle
|
|
|
|
// Return Value:
|
|
|
|
// - S_OK
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::SetHwnd(const HWND hwnd) noexcept
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
|
|
|
_hwndTarget = hwnd;
|
|
|
|
_chainMode = SwapChainMode::ForHwnd;
|
|
|
|
return S_OK;
|
|
|
|
}
|
|
|
|
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::SetWindowSize(const SIZE Pixels) noexcept
|
2020-04-13 22:09:02 +02:00
|
|
|
try
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
|
|
|
_sizeTarget = Pixels;
|
2020-06-22 18:13:09 +02:00
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
_invalidMap.resize(_sizeTarget / _glyphCell, true);
|
2019-05-03 00:29:04 +02:00
|
|
|
return S_OK;
|
|
|
|
}
|
2020-04-13 22:09:02 +02:00
|
|
|
CATCH_RETURN();
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
void DxEngine::SetCallback(std::function<void()> pfn)
|
|
|
|
{
|
|
|
|
_pfn = pfn;
|
|
|
|
}
|
|
|
|
|
2020-07-02 01:17:43 +02:00
|
|
|
bool DxEngine::GetRetroTerminalEffects() const noexcept
|
|
|
|
{
|
|
|
|
return _retroTerminalEffects;
|
|
|
|
}
|
|
|
|
|
2019-12-12 14:44:01 +01:00
|
|
|
void DxEngine::SetRetroTerminalEffects(bool enable) noexcept
|
2020-06-19 23:09:37 +02:00
|
|
|
try
|
2019-12-12 14:44:01 +01:00
|
|
|
{
|
2020-06-19 23:09:37 +02:00
|
|
|
if (_retroTerminalEffects != enable)
|
|
|
|
{
|
|
|
|
_retroTerminalEffects = enable;
|
|
|
|
_recreateDeviceRequested = true;
|
|
|
|
LOG_IF_FAILED(InvalidateAll());
|
|
|
|
}
|
2019-12-12 14:44:01 +01:00
|
|
|
}
|
2020-06-19 23:09:37 +02:00
|
|
|
CATCH_LOG()
|
2019-12-12 14:44:01 +01:00
|
|
|
|
Add renderer settings to mitigate blurry text for some graphics devices
## Summary of the Pull Request
Adds user settings to adjust rendering behavior to mitigate blurry text on some devices.
## References
- #778 introduced this, almost certainly.
## PR Checklist
* [x] Closes #5759, mostly
* [x] I work here.
* [ ] We need community verification that this will help.
* [x] Updated schema and schema doc.
* [x] Am core contributor. Discussed in Monday sync meeting and w/ @DHowett-MSFT.
## Detailed Description of the Pull Request / Additional comments
When we switched from full-screen repaints to incremental rendering, it seems like we exposed a situation where some display drivers and hardware combinations do not handle scroll and/or dirty regions (from `IDXGISwapChain::Present1`) without blurring the data from the previous frame. As we're really close to ship, I'm offering two options to let people in this situation escape it on their own. We hope in the future to figure out what's actually going on here and mitigate it further in software, but until then, these escape hatches are available.
1. `experimental.rendering.forceFullRepaint` - This one restores the pre-778 behavior to the Terminal. On every single frame paint, we'll invalidate the entire screen and repaint it.
2. `experimental.rendering.software` - This one uses the software WARP renderer instead of using the hardware and display driver directly. The theory is that this will sidestep any driver bugs or hardware variations.
One, the other, or both of these may be field-applied by users who are experiencing this behavior.
Reverting #778 completely would also resolve this, but it would give back our largest performance win in the whole Terminal project. We don't believe that's acceptable when seemingly a majority of the users are experiencing the performance benefit with no detriment to graphical display.
## Validation Steps Performed
- [x] Flipped them on and verified with the debugger that they are being applied to the rendering pipeline
- [ ] Gave a private copy to community members in #5759 and had them try whether one, the other, or both resolved their issue.
2020-05-11 23:54:03 +02:00
|
|
|
void DxEngine::SetForceFullRepaintRendering(bool enable) noexcept
|
2020-06-19 23:09:37 +02:00
|
|
|
try
|
Add renderer settings to mitigate blurry text for some graphics devices
## Summary of the Pull Request
Adds user settings to adjust rendering behavior to mitigate blurry text on some devices.
## References
- #778 introduced this, almost certainly.
## PR Checklist
* [x] Closes #5759, mostly
* [x] I work here.
* [ ] We need community verification that this will help.
* [x] Updated schema and schema doc.
* [x] Am core contributor. Discussed in Monday sync meeting and w/ @DHowett-MSFT.
## Detailed Description of the Pull Request / Additional comments
When we switched from full-screen repaints to incremental rendering, it seems like we exposed a situation where some display drivers and hardware combinations do not handle scroll and/or dirty regions (from `IDXGISwapChain::Present1`) without blurring the data from the previous frame. As we're really close to ship, I'm offering two options to let people in this situation escape it on their own. We hope in the future to figure out what's actually going on here and mitigate it further in software, but until then, these escape hatches are available.
1. `experimental.rendering.forceFullRepaint` - This one restores the pre-778 behavior to the Terminal. On every single frame paint, we'll invalidate the entire screen and repaint it.
2. `experimental.rendering.software` - This one uses the software WARP renderer instead of using the hardware and display driver directly. The theory is that this will sidestep any driver bugs or hardware variations.
One, the other, or both of these may be field-applied by users who are experiencing this behavior.
Reverting #778 completely would also resolve this, but it would give back our largest performance win in the whole Terminal project. We don't believe that's acceptable when seemingly a majority of the users are experiencing the performance benefit with no detriment to graphical display.
## Validation Steps Performed
- [x] Flipped them on and verified with the debugger that they are being applied to the rendering pipeline
- [ ] Gave a private copy to community members in #5759 and had them try whether one, the other, or both resolved their issue.
2020-05-11 23:54:03 +02:00
|
|
|
{
|
2020-06-19 23:09:37 +02:00
|
|
|
if (_forceFullRepaintRendering != enable)
|
|
|
|
{
|
|
|
|
_forceFullRepaintRendering = enable;
|
|
|
|
LOG_IF_FAILED(InvalidateAll());
|
|
|
|
}
|
Add renderer settings to mitigate blurry text for some graphics devices
## Summary of the Pull Request
Adds user settings to adjust rendering behavior to mitigate blurry text on some devices.
## References
- #778 introduced this, almost certainly.
## PR Checklist
* [x] Closes #5759, mostly
* [x] I work here.
* [ ] We need community verification that this will help.
* [x] Updated schema and schema doc.
* [x] Am core contributor. Discussed in Monday sync meeting and w/ @DHowett-MSFT.
## Detailed Description of the Pull Request / Additional comments
When we switched from full-screen repaints to incremental rendering, it seems like we exposed a situation where some display drivers and hardware combinations do not handle scroll and/or dirty regions (from `IDXGISwapChain::Present1`) without blurring the data from the previous frame. As we're really close to ship, I'm offering two options to let people in this situation escape it on their own. We hope in the future to figure out what's actually going on here and mitigate it further in software, but until then, these escape hatches are available.
1. `experimental.rendering.forceFullRepaint` - This one restores the pre-778 behavior to the Terminal. On every single frame paint, we'll invalidate the entire screen and repaint it.
2. `experimental.rendering.software` - This one uses the software WARP renderer instead of using the hardware and display driver directly. The theory is that this will sidestep any driver bugs or hardware variations.
One, the other, or both of these may be field-applied by users who are experiencing this behavior.
Reverting #778 completely would also resolve this, but it would give back our largest performance win in the whole Terminal project. We don't believe that's acceptable when seemingly a majority of the users are experiencing the performance benefit with no detriment to graphical display.
## Validation Steps Performed
- [x] Flipped them on and verified with the debugger that they are being applied to the rendering pipeline
- [ ] Gave a private copy to community members in #5759 and had them try whether one, the other, or both resolved their issue.
2020-05-11 23:54:03 +02:00
|
|
|
}
|
2020-06-19 23:09:37 +02:00
|
|
|
CATCH_LOG()
|
Add renderer settings to mitigate blurry text for some graphics devices
## Summary of the Pull Request
Adds user settings to adjust rendering behavior to mitigate blurry text on some devices.
## References
- #778 introduced this, almost certainly.
## PR Checklist
* [x] Closes #5759, mostly
* [x] I work here.
* [ ] We need community verification that this will help.
* [x] Updated schema and schema doc.
* [x] Am core contributor. Discussed in Monday sync meeting and w/ @DHowett-MSFT.
## Detailed Description of the Pull Request / Additional comments
When we switched from full-screen repaints to incremental rendering, it seems like we exposed a situation where some display drivers and hardware combinations do not handle scroll and/or dirty regions (from `IDXGISwapChain::Present1`) without blurring the data from the previous frame. As we're really close to ship, I'm offering two options to let people in this situation escape it on their own. We hope in the future to figure out what's actually going on here and mitigate it further in software, but until then, these escape hatches are available.
1. `experimental.rendering.forceFullRepaint` - This one restores the pre-778 behavior to the Terminal. On every single frame paint, we'll invalidate the entire screen and repaint it.
2. `experimental.rendering.software` - This one uses the software WARP renderer instead of using the hardware and display driver directly. The theory is that this will sidestep any driver bugs or hardware variations.
One, the other, or both of these may be field-applied by users who are experiencing this behavior.
Reverting #778 completely would also resolve this, but it would give back our largest performance win in the whole Terminal project. We don't believe that's acceptable when seemingly a majority of the users are experiencing the performance benefit with no detriment to graphical display.
## Validation Steps Performed
- [x] Flipped them on and verified with the debugger that they are being applied to the rendering pipeline
- [ ] Gave a private copy to community members in #5759 and had them try whether one, the other, or both resolved their issue.
2020-05-11 23:54:03 +02:00
|
|
|
|
|
|
|
void DxEngine::SetSoftwareRendering(bool enable) noexcept
|
2020-06-19 23:09:37 +02:00
|
|
|
try
|
Add renderer settings to mitigate blurry text for some graphics devices
## Summary of the Pull Request
Adds user settings to adjust rendering behavior to mitigate blurry text on some devices.
## References
- #778 introduced this, almost certainly.
## PR Checklist
* [x] Closes #5759, mostly
* [x] I work here.
* [ ] We need community verification that this will help.
* [x] Updated schema and schema doc.
* [x] Am core contributor. Discussed in Monday sync meeting and w/ @DHowett-MSFT.
## Detailed Description of the Pull Request / Additional comments
When we switched from full-screen repaints to incremental rendering, it seems like we exposed a situation where some display drivers and hardware combinations do not handle scroll and/or dirty regions (from `IDXGISwapChain::Present1`) without blurring the data from the previous frame. As we're really close to ship, I'm offering two options to let people in this situation escape it on their own. We hope in the future to figure out what's actually going on here and mitigate it further in software, but until then, these escape hatches are available.
1. `experimental.rendering.forceFullRepaint` - This one restores the pre-778 behavior to the Terminal. On every single frame paint, we'll invalidate the entire screen and repaint it.
2. `experimental.rendering.software` - This one uses the software WARP renderer instead of using the hardware and display driver directly. The theory is that this will sidestep any driver bugs or hardware variations.
One, the other, or both of these may be field-applied by users who are experiencing this behavior.
Reverting #778 completely would also resolve this, but it would give back our largest performance win in the whole Terminal project. We don't believe that's acceptable when seemingly a majority of the users are experiencing the performance benefit with no detriment to graphical display.
## Validation Steps Performed
- [x] Flipped them on and verified with the debugger that they are being applied to the rendering pipeline
- [ ] Gave a private copy to community members in #5759 and had them try whether one, the other, or both resolved their issue.
2020-05-11 23:54:03 +02:00
|
|
|
{
|
2020-06-19 23:09:37 +02:00
|
|
|
if (_softwareRendering != enable)
|
|
|
|
{
|
|
|
|
_softwareRendering = enable;
|
|
|
|
_recreateDeviceRequested = true;
|
|
|
|
LOG_IF_FAILED(InvalidateAll());
|
|
|
|
}
|
Add renderer settings to mitigate blurry text for some graphics devices
## Summary of the Pull Request
Adds user settings to adjust rendering behavior to mitigate blurry text on some devices.
## References
- #778 introduced this, almost certainly.
## PR Checklist
* [x] Closes #5759, mostly
* [x] I work here.
* [ ] We need community verification that this will help.
* [x] Updated schema and schema doc.
* [x] Am core contributor. Discussed in Monday sync meeting and w/ @DHowett-MSFT.
## Detailed Description of the Pull Request / Additional comments
When we switched from full-screen repaints to incremental rendering, it seems like we exposed a situation where some display drivers and hardware combinations do not handle scroll and/or dirty regions (from `IDXGISwapChain::Present1`) without blurring the data from the previous frame. As we're really close to ship, I'm offering two options to let people in this situation escape it on their own. We hope in the future to figure out what's actually going on here and mitigate it further in software, but until then, these escape hatches are available.
1. `experimental.rendering.forceFullRepaint` - This one restores the pre-778 behavior to the Terminal. On every single frame paint, we'll invalidate the entire screen and repaint it.
2. `experimental.rendering.software` - This one uses the software WARP renderer instead of using the hardware and display driver directly. The theory is that this will sidestep any driver bugs or hardware variations.
One, the other, or both of these may be field-applied by users who are experiencing this behavior.
Reverting #778 completely would also resolve this, but it would give back our largest performance win in the whole Terminal project. We don't believe that's acceptable when seemingly a majority of the users are experiencing the performance benefit with no detriment to graphical display.
## Validation Steps Performed
- [x] Flipped them on and verified with the debugger that they are being applied to the rendering pipeline
- [ ] Gave a private copy to community members in #5759 and had them try whether one, the other, or both resolved their issue.
2020-05-11 23:54:03 +02:00
|
|
|
}
|
2020-06-19 23:09:37 +02:00
|
|
|
CATCH_LOG()
|
Add renderer settings to mitigate blurry text for some graphics devices
## Summary of the Pull Request
Adds user settings to adjust rendering behavior to mitigate blurry text on some devices.
## References
- #778 introduced this, almost certainly.
## PR Checklist
* [x] Closes #5759, mostly
* [x] I work here.
* [ ] We need community verification that this will help.
* [x] Updated schema and schema doc.
* [x] Am core contributor. Discussed in Monday sync meeting and w/ @DHowett-MSFT.
## Detailed Description of the Pull Request / Additional comments
When we switched from full-screen repaints to incremental rendering, it seems like we exposed a situation where some display drivers and hardware combinations do not handle scroll and/or dirty regions (from `IDXGISwapChain::Present1`) without blurring the data from the previous frame. As we're really close to ship, I'm offering two options to let people in this situation escape it on their own. We hope in the future to figure out what's actually going on here and mitigate it further in software, but until then, these escape hatches are available.
1. `experimental.rendering.forceFullRepaint` - This one restores the pre-778 behavior to the Terminal. On every single frame paint, we'll invalidate the entire screen and repaint it.
2. `experimental.rendering.software` - This one uses the software WARP renderer instead of using the hardware and display driver directly. The theory is that this will sidestep any driver bugs or hardware variations.
One, the other, or both of these may be field-applied by users who are experiencing this behavior.
Reverting #778 completely would also resolve this, but it would give back our largest performance win in the whole Terminal project. We don't believe that's acceptable when seemingly a majority of the users are experiencing the performance benefit with no detriment to graphical display.
## Validation Steps Performed
- [x] Flipped them on and verified with the debugger that they are being applied to the rendering pipeline
- [ ] Gave a private copy to community members in #5759 and had them try whether one, the other, or both resolved their issue.
2020-05-11 23:54:03 +02:00
|
|
|
|
2019-08-30 00:23:07 +02:00
|
|
|
Microsoft::WRL::ComPtr<IDXGISwapChain1> DxEngine::GetSwapChain()
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
|
|
|
if (_dxgiSwapChain.Get() == nullptr)
|
|
|
|
{
|
|
|
|
THROW_IF_FAILED(_CreateDeviceResources(true));
|
|
|
|
}
|
|
|
|
|
|
|
|
return _dxgiSwapChain;
|
|
|
|
}
|
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
void DxEngine::_InvalidateRectangle(const til::rectangle& rc)
|
|
|
|
{
|
|
|
|
auto invalidate = rc;
|
|
|
|
|
|
|
|
if (_invalidateFullRows)
|
|
|
|
{
|
|
|
|
invalidate = til::rectangle{ til::point{ static_cast<ptrdiff_t>(0), rc.top() }, til::size{ _invalidMap.size().width(), rc.height() } };
|
|
|
|
}
|
|
|
|
|
|
|
|
_invalidMap.set(invalidate);
|
|
|
|
}
|
|
|
|
|
2020-07-17 21:32:36 +02:00
|
|
|
bool DxEngine::_IsAllInvalid() const noexcept
|
|
|
|
{
|
|
|
|
return std::llabs(_invalidScroll.y()) >= _invalidMap.size().height();
|
|
|
|
}
|
|
|
|
|
2019-05-03 00:29:04 +02:00
|
|
|
// Routine Description:
|
|
|
|
// - Invalidates a rectangle described in characters
|
|
|
|
// Arguments:
|
|
|
|
// - psrRegion - Character rectangle
|
|
|
|
// Return Value:
|
|
|
|
// - S_OK
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::Invalidate(const SMALL_RECT* const psrRegion) noexcept
|
2020-04-13 22:09:02 +02:00
|
|
|
try
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2019-09-03 17:46:24 +02:00
|
|
|
RETURN_HR_IF_NULL(E_INVALIDARG, psrRegion);
|
|
|
|
|
2020-07-17 21:32:36 +02:00
|
|
|
if (!_allInvalid)
|
|
|
|
{
|
|
|
|
_InvalidateRectangle(Viewport::FromExclusive(*psrRegion).ToInclusive());
|
|
|
|
}
|
2020-04-13 22:09:02 +02:00
|
|
|
|
2019-05-03 00:29:04 +02:00
|
|
|
return S_OK;
|
|
|
|
}
|
2020-04-13 22:09:02 +02:00
|
|
|
CATCH_RETURN()
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Invalidates one specific character coordinate
|
|
|
|
// Arguments:
|
|
|
|
// - pcoordCursor - single point in the character cell grid
|
|
|
|
// Return Value:
|
|
|
|
// - S_OK
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::InvalidateCursor(const COORD* const pcoordCursor) noexcept
|
2020-04-13 22:09:02 +02:00
|
|
|
try
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2019-09-03 17:46:24 +02:00
|
|
|
RETURN_HR_IF_NULL(E_INVALIDARG, pcoordCursor);
|
|
|
|
|
2020-07-17 21:32:36 +02:00
|
|
|
if (!_allInvalid)
|
|
|
|
{
|
|
|
|
_InvalidateRectangle(til::rectangle{ *pcoordCursor, til::size{ 1, 1 } });
|
|
|
|
}
|
2020-04-13 22:09:02 +02:00
|
|
|
|
|
|
|
return S_OK;
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
2020-04-13 22:09:02 +02:00
|
|
|
CATCH_RETURN()
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Invalidates a rectangle describing a pixel area on the display
|
|
|
|
// Arguments:
|
|
|
|
// - prcDirtyClient - pixel rectangle
|
|
|
|
// Return Value:
|
|
|
|
// - S_OK
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::InvalidateSystem(const RECT* const prcDirtyClient) noexcept
|
2020-04-13 22:09:02 +02:00
|
|
|
try
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2019-09-03 17:46:24 +02:00
|
|
|
RETURN_HR_IF_NULL(E_INVALIDARG, prcDirtyClient);
|
|
|
|
|
2020-07-17 21:32:36 +02:00
|
|
|
if (!_allInvalid)
|
|
|
|
{
|
|
|
|
// Dirty client is in pixels. Use divide specialization against glyph factor to make conversion
|
|
|
|
// to cells.
|
|
|
|
_InvalidateRectangle(til::rectangle{ *prcDirtyClient }.scale_down(_glyphCell));
|
|
|
|
}
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
return S_OK;
|
|
|
|
}
|
2020-04-13 22:09:02 +02:00
|
|
|
CATCH_RETURN();
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Invalidates a series of character rectangles
|
|
|
|
// Arguments:
|
|
|
|
// - rectangles - One or more rectangles describing character positions on the grid
|
|
|
|
// Return Value:
|
|
|
|
// - S_OK
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::InvalidateSelection(const std::vector<SMALL_RECT>& rectangles) noexcept
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2020-07-17 21:32:36 +02:00
|
|
|
if (!_allInvalid)
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2020-07-17 21:32:36 +02:00
|
|
|
for (const auto& rect : rectangles)
|
|
|
|
{
|
|
|
|
RETURN_IF_FAILED(Invalidate(&rect));
|
|
|
|
}
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
|
|
|
return S_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Scrolls the existing dirty region (if it exists) and
|
|
|
|
// invalidates the area that is uncovered in the window.
|
|
|
|
// Arguments:
|
|
|
|
// - pcoordDelta - The number of characters to move and uncover.
|
|
|
|
// - -Y is up, Y is down, -X is left, X is right.
|
|
|
|
// Return Value:
|
|
|
|
// - S_OK
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::InvalidateScroll(const COORD* const pcoordDelta) noexcept
|
2020-04-13 22:09:02 +02:00
|
|
|
try
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2020-04-13 22:09:02 +02:00
|
|
|
RETURN_HR_IF(E_INVALIDARG, !pcoordDelta);
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
const til::point deltaCells{ *pcoordDelta };
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2020-07-17 21:32:36 +02:00
|
|
|
if (!_allInvalid)
|
2020-04-13 22:09:02 +02:00
|
|
|
{
|
2020-07-17 21:32:36 +02:00
|
|
|
if (deltaCells != til::point{ 0, 0 })
|
|
|
|
{
|
|
|
|
// Shift the contents of the map and fill in revealed area.
|
|
|
|
_invalidMap.translate(deltaCells, true);
|
|
|
|
_invalidScroll += deltaCells;
|
|
|
|
_allInvalid = _IsAllInvalid();
|
|
|
|
}
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return S_OK;
|
|
|
|
}
|
2020-04-13 22:09:02 +02:00
|
|
|
CATCH_RETURN();
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Invalidates the entire window area
|
|
|
|
// Arguments:
|
|
|
|
// - <none>
|
|
|
|
// Return Value:
|
|
|
|
// - S_OK
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::InvalidateAll() noexcept
|
2020-04-13 22:09:02 +02:00
|
|
|
try
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2020-04-13 22:09:02 +02:00
|
|
|
_invalidMap.set_all();
|
2020-07-17 21:32:36 +02:00
|
|
|
_allInvalid = true;
|
Adjusts High DPI scaling to enable differential rendering (#5345)
## Summary of the Pull Request
- Adjusts scaling practices in `DxEngine` (and related scaling practices in `TerminalControl`) for pixel-perfect row baselines and spacing at High DPI such that differential row-by-row rendering can be applied at High DPI.
## References
- #5185
## PR Checklist
* [x] Closes #5320, closes #3515, closes #1064
* [x] I work here.
* [x] Manually tested.
* [x] No doc.
* [x] Am core contributor. Also discussed with some of them already via Teams.
## Detailed Description of the Pull Request / Additional comments
**WAS:**
- We were using implicit DPI scaling on the `ID2D1RenderTarget` and running all of our processing in DIPs (Device-Independent Pixels). That's all well and good for getting things bootstrapped quickly, but it leaves the actual scaling of the draw commands up to the discretion of the rendering target.
- When we don't get to explicitly choose exactly how many pixels tall/wide and our X/Y placement perfectly, the nature of floating point multiplication and division required to do the presentation can cause us to drift off slightly out of our control depending on what the final display resolution actually is.
- Differential drawing cannot work unless we can know the exact integer pixels that need to be copied/moved/preserved/replaced between frames to give to the `IDXGISwapChain1::Present1` method. If things spill into fractional pixels or the sizes of rows/columns vary as they are rounded up and down implicitly, then we cannot do the differential rendering.
**NOW:**
- When deciding on a font, the `DxEngine` will take the scale factor into account and adjust the proposed height of the requested font. Then the remainder of the existing code that adjusts the baseline and integer-ifies each character cell will run naturally from there. That code already works correctly to align the height at normal DPI and scale out the font heights and advances to take an exact integer of pixels.
- `TermControl` has to use the scale now, in some places, and stop scaling in other places. This has to do with how the target's nature used to be implicit and is now explicit. For instance, determining where the cursor click hits must be scaled now. And determining the pixel size of the display canvas must no longer be scaled.
- `DxEngine` will no longer attempt to scale the invalid regions per my attempts in #5185 because the cell size is scaled. So it should work the same as at 96 DPI.
- The block is removed from the `DxEngine` that was causing a full invalidate on every frame at High DPI.
- A TODO was removed from `TermControl` that was invalidating everything when the DPI changed because the underlying renderer will already do that.
## Validation Steps Performed
* [x] Check at 150% DPI. Print text, scroll text down and up, do selection.
* [x] Check at 100% DPI. Print text, scroll text down and up, do selection.
* [x] Span two different DPI monitors and drag between them.
* [x] Giant pile of tests in https://github.com/microsoft/terminal/pull/5345#issuecomment-614127648
Co-authored-by: Dustin Howett <duhowett@microsoft.com>
Co-authored-by: Mike Griese <migrie@microsoft.com>
2020-04-22 23:59:51 +02:00
|
|
|
|
|
|
|
// Since everything is invalidated here, mark this as a "first frame", so
|
|
|
|
// that we won't use incremental drawing on it. The caller of this intended
|
|
|
|
// for _everything_ to get redrawn, so setting _firstFrame will force us to
|
|
|
|
// redraw the entire frame. This will make sure that things like the gutters
|
|
|
|
// get cleared correctly.
|
|
|
|
//
|
|
|
|
// Invalidating everything is supposed to happen with resizes of the
|
|
|
|
// entire canvas, changes of the font, and other such adjustments.
|
|
|
|
_firstFrame = true;
|
2019-05-03 00:29:04 +02:00
|
|
|
return S_OK;
|
|
|
|
}
|
2020-04-13 22:09:02 +02:00
|
|
|
CATCH_RETURN();
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - This currently has no effect in this renderer.
|
|
|
|
// Arguments:
|
|
|
|
// - pForcePaint - Always filled with false
|
|
|
|
// Return Value:
|
|
|
|
// - S_FALSE because we don't use this.
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::InvalidateCircling(_Out_ bool* const pForcePaint) noexcept
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2019-09-03 17:46:24 +02:00
|
|
|
RETURN_HR_IF_NULL(E_INVALIDARG, pForcePaint);
|
|
|
|
|
2019-05-03 00:29:04 +02:00
|
|
|
*pForcePaint = false;
|
|
|
|
return S_FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Gets the area in pixels of the surface we are targeting
|
|
|
|
// Arguments:
|
|
|
|
// - <none>
|
|
|
|
// Return Value:
|
|
|
|
// - X by Y area in pixels of the surface
|
2020-04-13 22:09:02 +02:00
|
|
|
[[nodiscard]] til::size DxEngine::_GetClientSize() const
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
|
|
|
switch (_chainMode)
|
|
|
|
{
|
|
|
|
case SwapChainMode::ForHwnd:
|
|
|
|
{
|
|
|
|
RECT clientRect = { 0 };
|
|
|
|
LOG_IF_WIN32_BOOL_FALSE(GetClientRect(_hwndTarget, &clientRect));
|
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
return til::rectangle{ clientRect }.size();
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
|
|
|
case SwapChainMode::ForComposition:
|
|
|
|
{
|
Adjusts High DPI scaling to enable differential rendering (#5345)
## Summary of the Pull Request
- Adjusts scaling practices in `DxEngine` (and related scaling practices in `TerminalControl`) for pixel-perfect row baselines and spacing at High DPI such that differential row-by-row rendering can be applied at High DPI.
## References
- #5185
## PR Checklist
* [x] Closes #5320, closes #3515, closes #1064
* [x] I work here.
* [x] Manually tested.
* [x] No doc.
* [x] Am core contributor. Also discussed with some of them already via Teams.
## Detailed Description of the Pull Request / Additional comments
**WAS:**
- We were using implicit DPI scaling on the `ID2D1RenderTarget` and running all of our processing in DIPs (Device-Independent Pixels). That's all well and good for getting things bootstrapped quickly, but it leaves the actual scaling of the draw commands up to the discretion of the rendering target.
- When we don't get to explicitly choose exactly how many pixels tall/wide and our X/Y placement perfectly, the nature of floating point multiplication and division required to do the presentation can cause us to drift off slightly out of our control depending on what the final display resolution actually is.
- Differential drawing cannot work unless we can know the exact integer pixels that need to be copied/moved/preserved/replaced between frames to give to the `IDXGISwapChain1::Present1` method. If things spill into fractional pixels or the sizes of rows/columns vary as they are rounded up and down implicitly, then we cannot do the differential rendering.
**NOW:**
- When deciding on a font, the `DxEngine` will take the scale factor into account and adjust the proposed height of the requested font. Then the remainder of the existing code that adjusts the baseline and integer-ifies each character cell will run naturally from there. That code already works correctly to align the height at normal DPI and scale out the font heights and advances to take an exact integer of pixels.
- `TermControl` has to use the scale now, in some places, and stop scaling in other places. This has to do with how the target's nature used to be implicit and is now explicit. For instance, determining where the cursor click hits must be scaled now. And determining the pixel size of the display canvas must no longer be scaled.
- `DxEngine` will no longer attempt to scale the invalid regions per my attempts in #5185 because the cell size is scaled. So it should work the same as at 96 DPI.
- The block is removed from the `DxEngine` that was causing a full invalidate on every frame at High DPI.
- A TODO was removed from `TermControl` that was invalidating everything when the DPI changed because the underlying renderer will already do that.
## Validation Steps Performed
* [x] Check at 150% DPI. Print text, scroll text down and up, do selection.
* [x] Check at 100% DPI. Print text, scroll text down and up, do selection.
* [x] Span two different DPI monitors and drag between them.
* [x] Giant pile of tests in https://github.com/microsoft/terminal/pull/5345#issuecomment-614127648
Co-authored-by: Dustin Howett <duhowett@microsoft.com>
Co-authored-by: Mike Griese <migrie@microsoft.com>
2020-04-22 23:59:51 +02:00
|
|
|
return _sizeTarget;
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
|
|
|
default:
|
2019-08-30 00:23:07 +02:00
|
|
|
FAIL_FAST_HR(E_NOTIMPL);
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Helper to multiply all parameters of a rectangle by the font size
|
|
|
|
// to convert from characters to pixels.
|
|
|
|
// Arguments:
|
|
|
|
// - cellsToPixels - rectangle to update
|
|
|
|
// - fontSize - scaling factors
|
|
|
|
// Return Value:
|
|
|
|
// - <none> - Updates reference
|
|
|
|
void _ScaleByFont(RECT& cellsToPixels, SIZE fontSize) noexcept
|
|
|
|
{
|
|
|
|
cellsToPixels.left *= fontSize.cx;
|
|
|
|
cellsToPixels.right *= fontSize.cx;
|
|
|
|
cellsToPixels.top *= fontSize.cy;
|
|
|
|
cellsToPixels.bottom *= fontSize.cy;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - This is unused by this renderer.
|
|
|
|
// Arguments:
|
|
|
|
// - pForcePaint - always filled with false.
|
|
|
|
// Return Value:
|
|
|
|
// - S_FALSE because this is unused.
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::PrepareForTeardown(_Out_ bool* const pForcePaint) noexcept
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2019-09-03 17:46:24 +02:00
|
|
|
RETURN_HR_IF_NULL(E_INVALIDARG, pForcePaint);
|
|
|
|
|
2019-05-03 00:29:04 +02:00
|
|
|
*pForcePaint = false;
|
|
|
|
return S_FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Routine description:
|
|
|
|
// - Prepares the surfaces for painting and begins a drawing batch
|
|
|
|
// Arguments:
|
|
|
|
// - <none>
|
|
|
|
// Return Value:
|
|
|
|
// - Any DirectX error, a memory error, etc.
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::StartPaint() noexcept
|
2020-04-13 22:09:02 +02:00
|
|
|
try
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
|
|
|
RETURN_HR_IF(E_NOT_VALID_STATE, _isPainting); // invalid to start a paint while painting.
|
|
|
|
|
Add renderer settings to mitigate blurry text for some graphics devices
## Summary of the Pull Request
Adds user settings to adjust rendering behavior to mitigate blurry text on some devices.
## References
- #778 introduced this, almost certainly.
## PR Checklist
* [x] Closes #5759, mostly
* [x] I work here.
* [ ] We need community verification that this will help.
* [x] Updated schema and schema doc.
* [x] Am core contributor. Discussed in Monday sync meeting and w/ @DHowett-MSFT.
## Detailed Description of the Pull Request / Additional comments
When we switched from full-screen repaints to incremental rendering, it seems like we exposed a situation where some display drivers and hardware combinations do not handle scroll and/or dirty regions (from `IDXGISwapChain::Present1`) without blurring the data from the previous frame. As we're really close to ship, I'm offering two options to let people in this situation escape it on their own. We hope in the future to figure out what's actually going on here and mitigate it further in software, but until then, these escape hatches are available.
1. `experimental.rendering.forceFullRepaint` - This one restores the pre-778 behavior to the Terminal. On every single frame paint, we'll invalidate the entire screen and repaint it.
2. `experimental.rendering.software` - This one uses the software WARP renderer instead of using the hardware and display driver directly. The theory is that this will sidestep any driver bugs or hardware variations.
One, the other, or both of these may be field-applied by users who are experiencing this behavior.
Reverting #778 completely would also resolve this, but it would give back our largest performance win in the whole Terminal project. We don't believe that's acceptable when seemingly a majority of the users are experiencing the performance benefit with no detriment to graphical display.
## Validation Steps Performed
- [x] Flipped them on and verified with the debugger that they are being applied to the rendering pipeline
- [ ] Gave a private copy to community members in #5759 and had them try whether one, the other, or both resolved their issue.
2020-05-11 23:54:03 +02:00
|
|
|
// If someone explicitly requested differential rendering off, then we need to invalidate everything
|
|
|
|
// so the entire frame is repainted.
|
|
|
|
//
|
2020-04-13 22:09:02 +02:00
|
|
|
// If retro terminal effects are on, we must invalidate everything for them to draw correctly.
|
|
|
|
// Yes, this will further impact the performance of retro terminal effects.
|
|
|
|
// But we're talking about running the entire display pipeline through a shader for
|
|
|
|
// cosmetic effect, so performance isn't likely the top concern with this feature.
|
Add renderer settings to mitigate blurry text for some graphics devices
## Summary of the Pull Request
Adds user settings to adjust rendering behavior to mitigate blurry text on some devices.
## References
- #778 introduced this, almost certainly.
## PR Checklist
* [x] Closes #5759, mostly
* [x] I work here.
* [ ] We need community verification that this will help.
* [x] Updated schema and schema doc.
* [x] Am core contributor. Discussed in Monday sync meeting and w/ @DHowett-MSFT.
## Detailed Description of the Pull Request / Additional comments
When we switched from full-screen repaints to incremental rendering, it seems like we exposed a situation where some display drivers and hardware combinations do not handle scroll and/or dirty regions (from `IDXGISwapChain::Present1`) without blurring the data from the previous frame. As we're really close to ship, I'm offering two options to let people in this situation escape it on their own. We hope in the future to figure out what's actually going on here and mitigate it further in software, but until then, these escape hatches are available.
1. `experimental.rendering.forceFullRepaint` - This one restores the pre-778 behavior to the Terminal. On every single frame paint, we'll invalidate the entire screen and repaint it.
2. `experimental.rendering.software` - This one uses the software WARP renderer instead of using the hardware and display driver directly. The theory is that this will sidestep any driver bugs or hardware variations.
One, the other, or both of these may be field-applied by users who are experiencing this behavior.
Reverting #778 completely would also resolve this, but it would give back our largest performance win in the whole Terminal project. We don't believe that's acceptable when seemingly a majority of the users are experiencing the performance benefit with no detriment to graphical display.
## Validation Steps Performed
- [x] Flipped them on and verified with the debugger that they are being applied to the rendering pipeline
- [ ] Gave a private copy to community members in #5759 and had them try whether one, the other, or both resolved their issue.
2020-05-11 23:54:03 +02:00
|
|
|
if (_forceFullRepaintRendering || _retroTerminalEffects)
|
2020-04-13 22:09:02 +02:00
|
|
|
{
|
|
|
|
_invalidMap.set_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (TraceLoggingProviderEnabled(g_hDxRenderProvider, WINEVENT_LEVEL_VERBOSE, 0))
|
|
|
|
{
|
|
|
|
const auto invalidatedStr = _invalidMap.to_string();
|
|
|
|
const auto invalidated = invalidatedStr.c_str();
|
|
|
|
|
2020-02-21 00:13:43 +01:00
|
|
|
#pragma warning(suppress : 26477 26485 26494 26482 26446 26447) // We don't control TraceLoggingWrite
|
2020-04-13 22:09:02 +02:00
|
|
|
TraceLoggingWrite(g_hDxRenderProvider,
|
|
|
|
"Invalid",
|
|
|
|
TraceLoggingWideString(invalidated),
|
|
|
|
TraceLoggingLevel(WINEVENT_LEVEL_VERBOSE));
|
|
|
|
}
|
2020-02-21 00:13:43 +01:00
|
|
|
|
2019-06-11 22:27:09 +02:00
|
|
|
if (_isEnabled)
|
|
|
|
{
|
2020-04-13 22:09:02 +02:00
|
|
|
const auto clientSize = _GetClientSize();
|
2020-06-19 23:09:37 +02:00
|
|
|
|
|
|
|
// If we don't have device resources or if someone has requested that we
|
|
|
|
// recreate the device... then make new resources. (Create will dump the old ones.)
|
|
|
|
if (!_haveDeviceResources || _recreateDeviceRequested)
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2020-04-13 22:09:02 +02:00
|
|
|
RETURN_IF_FAILED(_CreateDeviceResources(true));
|
2020-06-19 23:09:37 +02:00
|
|
|
_recreateDeviceRequested = false;
|
2020-04-13 22:09:02 +02:00
|
|
|
}
|
Adjusts High DPI scaling to enable differential rendering (#5345)
## Summary of the Pull Request
- Adjusts scaling practices in `DxEngine` (and related scaling practices in `TerminalControl`) for pixel-perfect row baselines and spacing at High DPI such that differential row-by-row rendering can be applied at High DPI.
## References
- #5185
## PR Checklist
* [x] Closes #5320, closes #3515, closes #1064
* [x] I work here.
* [x] Manually tested.
* [x] No doc.
* [x] Am core contributor. Also discussed with some of them already via Teams.
## Detailed Description of the Pull Request / Additional comments
**WAS:**
- We were using implicit DPI scaling on the `ID2D1RenderTarget` and running all of our processing in DIPs (Device-Independent Pixels). That's all well and good for getting things bootstrapped quickly, but it leaves the actual scaling of the draw commands up to the discretion of the rendering target.
- When we don't get to explicitly choose exactly how many pixels tall/wide and our X/Y placement perfectly, the nature of floating point multiplication and division required to do the presentation can cause us to drift off slightly out of our control depending on what the final display resolution actually is.
- Differential drawing cannot work unless we can know the exact integer pixels that need to be copied/moved/preserved/replaced between frames to give to the `IDXGISwapChain1::Present1` method. If things spill into fractional pixels or the sizes of rows/columns vary as they are rounded up and down implicitly, then we cannot do the differential rendering.
**NOW:**
- When deciding on a font, the `DxEngine` will take the scale factor into account and adjust the proposed height of the requested font. Then the remainder of the existing code that adjusts the baseline and integer-ifies each character cell will run naturally from there. That code already works correctly to align the height at normal DPI and scale out the font heights and advances to take an exact integer of pixels.
- `TermControl` has to use the scale now, in some places, and stop scaling in other places. This has to do with how the target's nature used to be implicit and is now explicit. For instance, determining where the cursor click hits must be scaled now. And determining the pixel size of the display canvas must no longer be scaled.
- `DxEngine` will no longer attempt to scale the invalid regions per my attempts in #5185 because the cell size is scaled. So it should work the same as at 96 DPI.
- The block is removed from the `DxEngine` that was causing a full invalidate on every frame at High DPI.
- A TODO was removed from `TermControl` that was invalidating everything when the DPI changed because the underlying renderer will already do that.
## Validation Steps Performed
* [x] Check at 150% DPI. Print text, scroll text down and up, do selection.
* [x] Check at 100% DPI. Print text, scroll text down and up, do selection.
* [x] Span two different DPI monitors and drag between them.
* [x] Giant pile of tests in https://github.com/microsoft/terminal/pull/5345#issuecomment-614127648
Co-authored-by: Dustin Howett <duhowett@microsoft.com>
Co-authored-by: Mike Griese <migrie@microsoft.com>
2020-04-22 23:59:51 +02:00
|
|
|
else if (_displaySizePixels != clientSize || _prevScale != _scale)
|
2020-04-13 22:09:02 +02:00
|
|
|
{
|
|
|
|
// OK, we're going to play a dangerous game here for the sake of optimizing resize
|
|
|
|
// First, set up a complete clear of all device resources if something goes terribly wrong.
|
|
|
|
auto resetDeviceResourcesOnFailure = wil::scope_exit([&]() noexcept {
|
|
|
|
_ReleaseDeviceResources();
|
|
|
|
});
|
2019-07-02 21:51:28 +02:00
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
// Now let go of a few of the device resources that get in the way of resizing buffers in the swap chain
|
|
|
|
_dxgiSurface.Reset();
|
2020-06-20 00:14:01 +02:00
|
|
|
_d2dDeviceContext->SetTarget(nullptr);
|
|
|
|
_d2dBitmap.Reset();
|
2019-07-02 21:51:28 +02:00
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
// Change the buffer size and recreate the render target (and surface)
|
2020-06-20 00:14:01 +02:00
|
|
|
RETURN_IF_FAILED(_dxgiSwapChain->ResizeBuffers(2, clientSize.width<UINT>(), clientSize.height<UINT>(), _swapChainDesc.Format, _swapChainDesc.Flags));
|
2020-04-13 22:09:02 +02:00
|
|
|
RETURN_IF_FAILED(_PrepareRenderTarget());
|
2019-07-02 21:51:28 +02:00
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
// OK we made it past the parts that can cause errors. We can release our failure handler.
|
|
|
|
resetDeviceResourcesOnFailure.release();
|
2019-07-02 21:51:28 +02:00
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
// And persist the new size.
|
|
|
|
_displaySizePixels = clientSize;
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
// Mark this as the first frame on the new target. We can't use incremental drawing on the first frame.
|
|
|
|
_firstFrame = true;
|
2019-08-30 00:23:07 +02:00
|
|
|
}
|
2020-04-13 22:09:02 +02:00
|
|
|
|
2020-06-20 00:14:01 +02:00
|
|
|
_d2dDeviceContext->BeginDraw();
|
2020-04-13 22:09:02 +02:00
|
|
|
_isPainting = true;
|
2020-06-22 18:13:09 +02:00
|
|
|
|
|
|
|
{
|
|
|
|
// Get the baseline for this font as that's where we draw from
|
|
|
|
DWRITE_LINE_SPACING spacing;
|
|
|
|
RETURN_IF_FAILED(_dwriteTextFormat->GetLineSpacing(&spacing.method, &spacing.height, &spacing.baseline));
|
|
|
|
|
|
|
|
// Assemble the drawing context information
|
|
|
|
_drawingContext = std::make_unique<DrawingContext>(_d2dDeviceContext.Get(),
|
|
|
|
_d2dBrushForeground.Get(),
|
|
|
|
_d2dBrushBackground.Get(),
|
|
|
|
_ShouldForceGrayscaleAA(),
|
|
|
|
_dwriteFactory.Get(),
|
|
|
|
spacing,
|
|
|
|
_glyphCell,
|
|
|
|
_d2dDeviceContext->GetSize(),
|
|
|
|
std::nullopt,
|
|
|
|
D2D1_DRAW_TEXT_OPTIONS_ENABLE_COLOR_FONT);
|
|
|
|
}
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return S_OK;
|
|
|
|
}
|
2020-04-13 22:09:02 +02:00
|
|
|
CATCH_RETURN()
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Ends batch drawing and captures any state necessary for presentation
|
|
|
|
// Arguments:
|
|
|
|
// - <none>
|
|
|
|
// Return Value:
|
|
|
|
// - Any DirectX error, a memory error, etc.
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::EndPaint() noexcept
|
2020-04-13 22:09:02 +02:00
|
|
|
try
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
|
|
|
RETURN_HR_IF(E_INVALIDARG, !_isPainting); // invalid to end paint when we're not painting
|
|
|
|
|
|
|
|
HRESULT hr = S_OK;
|
|
|
|
|
2019-06-11 22:27:09 +02:00
|
|
|
if (_haveDeviceResources)
|
|
|
|
{
|
2019-05-03 00:29:04 +02:00
|
|
|
_isPainting = false;
|
|
|
|
|
2020-06-22 18:13:09 +02:00
|
|
|
// If there's still a clip hanging around, remove it. We're all done.
|
|
|
|
LOG_IF_FAILED(_customRenderer->EndClip(_drawingContext.get()));
|
|
|
|
|
2020-06-20 00:14:01 +02:00
|
|
|
hr = _d2dDeviceContext->EndDraw();
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2019-06-11 22:27:09 +02:00
|
|
|
if (SUCCEEDED(hr))
|
|
|
|
{
|
2020-04-13 22:09:02 +02:00
|
|
|
if (_invalidScroll != til::point{ 0, 0 })
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2020-04-13 22:09:02 +02:00
|
|
|
// Copy `til::rectangles` into RECT map.
|
|
|
|
_presentDirty.assign(_invalidMap.begin(), _invalidMap.end());
|
|
|
|
|
|
|
|
// Scale all dirty rectangles into pixels
|
|
|
|
std::transform(_presentDirty.begin(), _presentDirty.end(), _presentDirty.begin(), [&](til::rectangle rc) {
|
Adjusts High DPI scaling to enable differential rendering (#5345)
## Summary of the Pull Request
- Adjusts scaling practices in `DxEngine` (and related scaling practices in `TerminalControl`) for pixel-perfect row baselines and spacing at High DPI such that differential row-by-row rendering can be applied at High DPI.
## References
- #5185
## PR Checklist
* [x] Closes #5320, closes #3515, closes #1064
* [x] I work here.
* [x] Manually tested.
* [x] No doc.
* [x] Am core contributor. Also discussed with some of them already via Teams.
## Detailed Description of the Pull Request / Additional comments
**WAS:**
- We were using implicit DPI scaling on the `ID2D1RenderTarget` and running all of our processing in DIPs (Device-Independent Pixels). That's all well and good for getting things bootstrapped quickly, but it leaves the actual scaling of the draw commands up to the discretion of the rendering target.
- When we don't get to explicitly choose exactly how many pixels tall/wide and our X/Y placement perfectly, the nature of floating point multiplication and division required to do the presentation can cause us to drift off slightly out of our control depending on what the final display resolution actually is.
- Differential drawing cannot work unless we can know the exact integer pixels that need to be copied/moved/preserved/replaced between frames to give to the `IDXGISwapChain1::Present1` method. If things spill into fractional pixels or the sizes of rows/columns vary as they are rounded up and down implicitly, then we cannot do the differential rendering.
**NOW:**
- When deciding on a font, the `DxEngine` will take the scale factor into account and adjust the proposed height of the requested font. Then the remainder of the existing code that adjusts the baseline and integer-ifies each character cell will run naturally from there. That code already works correctly to align the height at normal DPI and scale out the font heights and advances to take an exact integer of pixels.
- `TermControl` has to use the scale now, in some places, and stop scaling in other places. This has to do with how the target's nature used to be implicit and is now explicit. For instance, determining where the cursor click hits must be scaled now. And determining the pixel size of the display canvas must no longer be scaled.
- `DxEngine` will no longer attempt to scale the invalid regions per my attempts in #5185 because the cell size is scaled. So it should work the same as at 96 DPI.
- The block is removed from the `DxEngine` that was causing a full invalidate on every frame at High DPI.
- A TODO was removed from `TermControl` that was invalidating everything when the DPI changed because the underlying renderer will already do that.
## Validation Steps Performed
* [x] Check at 150% DPI. Print text, scroll text down and up, do selection.
* [x] Check at 100% DPI. Print text, scroll text down and up, do selection.
* [x] Span two different DPI monitors and drag between them.
* [x] Giant pile of tests in https://github.com/microsoft/terminal/pull/5345#issuecomment-614127648
Co-authored-by: Dustin Howett <duhowett@microsoft.com>
Co-authored-by: Mike Griese <migrie@microsoft.com>
2020-04-22 23:59:51 +02:00
|
|
|
return rc.scale_up(_glyphCell);
|
2020-04-13 22:09:02 +02:00
|
|
|
});
|
|
|
|
|
|
|
|
// Invalid scroll is in characters, convert it to pixels.
|
Adjusts High DPI scaling to enable differential rendering (#5345)
## Summary of the Pull Request
- Adjusts scaling practices in `DxEngine` (and related scaling practices in `TerminalControl`) for pixel-perfect row baselines and spacing at High DPI such that differential row-by-row rendering can be applied at High DPI.
## References
- #5185
## PR Checklist
* [x] Closes #5320, closes #3515, closes #1064
* [x] I work here.
* [x] Manually tested.
* [x] No doc.
* [x] Am core contributor. Also discussed with some of them already via Teams.
## Detailed Description of the Pull Request / Additional comments
**WAS:**
- We were using implicit DPI scaling on the `ID2D1RenderTarget` and running all of our processing in DIPs (Device-Independent Pixels). That's all well and good for getting things bootstrapped quickly, but it leaves the actual scaling of the draw commands up to the discretion of the rendering target.
- When we don't get to explicitly choose exactly how many pixels tall/wide and our X/Y placement perfectly, the nature of floating point multiplication and division required to do the presentation can cause us to drift off slightly out of our control depending on what the final display resolution actually is.
- Differential drawing cannot work unless we can know the exact integer pixels that need to be copied/moved/preserved/replaced between frames to give to the `IDXGISwapChain1::Present1` method. If things spill into fractional pixels or the sizes of rows/columns vary as they are rounded up and down implicitly, then we cannot do the differential rendering.
**NOW:**
- When deciding on a font, the `DxEngine` will take the scale factor into account and adjust the proposed height of the requested font. Then the remainder of the existing code that adjusts the baseline and integer-ifies each character cell will run naturally from there. That code already works correctly to align the height at normal DPI and scale out the font heights and advances to take an exact integer of pixels.
- `TermControl` has to use the scale now, in some places, and stop scaling in other places. This has to do with how the target's nature used to be implicit and is now explicit. For instance, determining where the cursor click hits must be scaled now. And determining the pixel size of the display canvas must no longer be scaled.
- `DxEngine` will no longer attempt to scale the invalid regions per my attempts in #5185 because the cell size is scaled. So it should work the same as at 96 DPI.
- The block is removed from the `DxEngine` that was causing a full invalidate on every frame at High DPI.
- A TODO was removed from `TermControl` that was invalidating everything when the DPI changed because the underlying renderer will already do that.
## Validation Steps Performed
* [x] Check at 150% DPI. Print text, scroll text down and up, do selection.
* [x] Check at 100% DPI. Print text, scroll text down and up, do selection.
* [x] Span two different DPI monitors and drag between them.
* [x] Giant pile of tests in https://github.com/microsoft/terminal/pull/5345#issuecomment-614127648
Co-authored-by: Dustin Howett <duhowett@microsoft.com>
Co-authored-by: Mike Griese <migrie@microsoft.com>
2020-04-22 23:59:51 +02:00
|
|
|
const auto scrollPixels = (_invalidScroll * _glyphCell);
|
2020-04-13 22:09:02 +02:00
|
|
|
|
|
|
|
// The scroll rect is the entire field of cells, but in pixels.
|
|
|
|
til::rectangle scrollArea{ _invalidMap.size() * _glyphCell };
|
|
|
|
|
|
|
|
// Reduce the size of the rectangle by the scroll.
|
|
|
|
scrollArea -= til::size{} - scrollPixels;
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
// Assign the area to the present storage
|
|
|
|
_presentScroll = scrollArea;
|
|
|
|
|
|
|
|
// Pass the offset.
|
|
|
|
_presentOffset = scrollPixels;
|
|
|
|
|
|
|
|
// Now fill up the parameters structure from the member variables.
|
|
|
|
_presentParams.DirtyRectsCount = gsl::narrow<UINT>(_presentDirty.size());
|
|
|
|
_presentParams.pDirtyRects = _presentDirty.data();
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
_presentParams.pScrollOffset = &_presentOffset;
|
|
|
|
_presentParams.pScrollRect = &_presentScroll;
|
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
// The scroll rect will be empty if we scrolled >= 1 full screen size.
|
|
|
|
// Present1 doesn't like that. So clear it out. Everything will be dirty anyway.
|
2019-05-03 00:29:04 +02:00
|
|
|
if (IsRectEmpty(&_presentScroll))
|
|
|
|
{
|
|
|
|
_presentParams.pScrollRect = nullptr;
|
|
|
|
_presentParams.pScrollOffset = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
_presentReady = true;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
_presentReady = false;
|
|
|
|
_ReleaseDeviceResources();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
_invalidMap.reset_all();
|
2020-07-17 21:32:36 +02:00
|
|
|
_allInvalid = false;
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
_invalidScroll = {};
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
return hr;
|
|
|
|
}
|
2020-04-13 22:09:02 +02:00
|
|
|
CATCH_RETURN()
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Copies the front surface of the swap chain (the one being displayed)
|
|
|
|
// to the back surface of the swap chain (the one we draw on next)
|
|
|
|
// so we can draw on top of what's already there.
|
|
|
|
// Arguments:
|
|
|
|
// - <none>
|
|
|
|
// Return Value:
|
|
|
|
// - Any DirectX error, a memory error, etc.
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::_CopyFrontToBack() noexcept
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2019-08-30 00:23:07 +02:00
|
|
|
try
|
|
|
|
{
|
|
|
|
Microsoft::WRL::ComPtr<ID3D11Resource> backBuffer;
|
|
|
|
Microsoft::WRL::ComPtr<ID3D11Resource> frontBuffer;
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2019-08-30 00:23:07 +02:00
|
|
|
RETURN_IF_FAILED(_dxgiSwapChain->GetBuffer(0, IID_PPV_ARGS(&backBuffer)));
|
|
|
|
RETURN_IF_FAILED(_dxgiSwapChain->GetBuffer(1, IID_PPV_ARGS(&frontBuffer)));
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2019-08-30 00:23:07 +02:00
|
|
|
_d3dDeviceContext->CopyResource(backBuffer.Get(), frontBuffer.Get());
|
|
|
|
}
|
|
|
|
CATCH_RETURN();
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
return S_OK;
|
|
|
|
}
|
|
|
|
|
2020-06-11 00:35:14 +02:00
|
|
|
// Method Description:
|
|
|
|
// - Blocks until the engine is able to render without blocking.
|
|
|
|
// - See https://docs.microsoft.com/en-us/windows/uwp/gaming/reduce-latency-with-dxgi-1-3-swap-chains.
|
|
|
|
void DxEngine::WaitUntilCanRender() noexcept
|
|
|
|
{
|
|
|
|
if (!_swapChainFrameLatencyWaitableObject)
|
|
|
|
{
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto ret = WaitForSingleObjectEx(
|
|
|
|
_swapChainFrameLatencyWaitableObject.get(),
|
|
|
|
1000, // 1 second timeout (shouldn't ever occur)
|
|
|
|
true);
|
|
|
|
if (ret != WAIT_OBJECT_0)
|
|
|
|
{
|
|
|
|
LOG_WIN32_MSG(ret, "Waiting for swap chain frame latency waitable object returned error or timeout.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-03 00:29:04 +02:00
|
|
|
// Routine Description:
|
|
|
|
// - Takes queued drawing information and presents it to the screen.
|
2019-05-21 08:15:44 +02:00
|
|
|
// - This is separated out so it can be done outside the lock as it's expensive.
|
2019-05-03 00:29:04 +02:00
|
|
|
// Arguments:
|
|
|
|
// - <none>
|
|
|
|
// Return Value:
|
2019-09-24 00:06:47 +02:00
|
|
|
// - S_OK on success, E_PENDING to indicate a retry or a relevant DirectX error
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::Present() noexcept
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
Scale retro terminal scan lines (#4716)
<!-- Enter a brief description/summary of your PR here. What does it fix/what does it change/how was it tested (even manually, if necessary)? -->
## Summary of the Pull Request
- Scale the retro terminal effects (#3468) scan lines with the screen's DPI.
- Remove artifacts from sampling wrap around.
Before & after, with my display scale set to 350%:
![Scaling scan lines](https://user-images.githubusercontent.com/38924837/75214566-df0f4780-5742-11ea-9bdc-3430eb24ccca.png)
Before & after showing artifact removal, with my display scale set to 100%, and image enlarged to 400%:
![Sampling artifacts annotated](https://user-images.githubusercontent.com/38924837/75214618-05cd7e00-5743-11ea-9060-f4eba257ea56.png)
<!-- Please review the items on the PR checklist before submitting-->
## PR Checklist
* [x] Closes #4362
* [x] CLA signed. If not, go over [here](https://cla.opensource.microsoft.com/microsoft/Terminal) and sign the CLA
* [ ] Tests added/passed
* [ ] Requires documentation to be updated
* [ ] I've discussed this with core contributors already. If not checked, I'm ready to accept this work might be rejected in favor of a different grand plan. Issue number where discussion took place: #xxx
<!-- Provide a more detailed description of the PR, other things fixed or any additional comments/features here -->
## Detailed Description of the Pull Request / Additional comments
Adds a constant buffer, which could be used for other settings for the retro terminal pixel shader.
I haven't touched C++ in over a decade before this change, and this is the first time I've played with DirectX, so please assume my code isn't exactly best practice. 🙂
<!-- Describe how you validated the behavior. Add automated tests wherever possible, but list manual validation steps taken as well -->
## Validation Steps Performed
- Changed display scale with experimental.retroTerminalEffect enabled, enjoyed scan lines on high resolution monitors.
- Enabled experimental.retroTerminalEffect, turned the setting off, changed display scale. Retro tabs still scale scan lines.
2020-02-26 01:08:45 +01:00
|
|
|
if (_presentReady)
|
2019-12-12 14:44:01 +01:00
|
|
|
{
|
Scale retro terminal scan lines (#4716)
<!-- Enter a brief description/summary of your PR here. What does it fix/what does it change/how was it tested (even manually, if necessary)? -->
## Summary of the Pull Request
- Scale the retro terminal effects (#3468) scan lines with the screen's DPI.
- Remove artifacts from sampling wrap around.
Before & after, with my display scale set to 350%:
![Scaling scan lines](https://user-images.githubusercontent.com/38924837/75214566-df0f4780-5742-11ea-9bdc-3430eb24ccca.png)
Before & after showing artifact removal, with my display scale set to 100%, and image enlarged to 400%:
![Sampling artifacts annotated](https://user-images.githubusercontent.com/38924837/75214618-05cd7e00-5743-11ea-9060-f4eba257ea56.png)
<!-- Please review the items on the PR checklist before submitting-->
## PR Checklist
* [x] Closes #4362
* [x] CLA signed. If not, go over [here](https://cla.opensource.microsoft.com/microsoft/Terminal) and sign the CLA
* [ ] Tests added/passed
* [ ] Requires documentation to be updated
* [ ] I've discussed this with core contributors already. If not checked, I'm ready to accept this work might be rejected in favor of a different grand plan. Issue number where discussion took place: #xxx
<!-- Provide a more detailed description of the PR, other things fixed or any additional comments/features here -->
## Detailed Description of the Pull Request / Additional comments
Adds a constant buffer, which could be used for other settings for the retro terminal pixel shader.
I haven't touched C++ in over a decade before this change, and this is the first time I've played with DirectX, so please assume my code isn't exactly best practice. 🙂
<!-- Describe how you validated the behavior. Add automated tests wherever possible, but list manual validation steps taken as well -->
## Validation Steps Performed
- Changed display scale with experimental.retroTerminalEffect enabled, enjoyed scan lines on high resolution monitors.
- Enabled experimental.retroTerminalEffect, turned the setting off, changed display scale. Retro tabs still scale scan lines.
2020-02-26 01:08:45 +01:00
|
|
|
if (_retroTerminalEffects)
|
2019-12-12 14:44:01 +01:00
|
|
|
{
|
Scale retro terminal scan lines (#4716)
<!-- Enter a brief description/summary of your PR here. What does it fix/what does it change/how was it tested (even manually, if necessary)? -->
## Summary of the Pull Request
- Scale the retro terminal effects (#3468) scan lines with the screen's DPI.
- Remove artifacts from sampling wrap around.
Before & after, with my display scale set to 350%:
![Scaling scan lines](https://user-images.githubusercontent.com/38924837/75214566-df0f4780-5742-11ea-9bdc-3430eb24ccca.png)
Before & after showing artifact removal, with my display scale set to 100%, and image enlarged to 400%:
![Sampling artifacts annotated](https://user-images.githubusercontent.com/38924837/75214618-05cd7e00-5743-11ea-9060-f4eba257ea56.png)
<!-- Please review the items on the PR checklist before submitting-->
## PR Checklist
* [x] Closes #4362
* [x] CLA signed. If not, go over [here](https://cla.opensource.microsoft.com/microsoft/Terminal) and sign the CLA
* [ ] Tests added/passed
* [ ] Requires documentation to be updated
* [ ] I've discussed this with core contributors already. If not checked, I'm ready to accept this work might be rejected in favor of a different grand plan. Issue number where discussion took place: #xxx
<!-- Provide a more detailed description of the PR, other things fixed or any additional comments/features here -->
## Detailed Description of the Pull Request / Additional comments
Adds a constant buffer, which could be used for other settings for the retro terminal pixel shader.
I haven't touched C++ in over a decade before this change, and this is the first time I've played with DirectX, so please assume my code isn't exactly best practice. 🙂
<!-- Describe how you validated the behavior. Add automated tests wherever possible, but list manual validation steps taken as well -->
## Validation Steps Performed
- Changed display scale with experimental.retroTerminalEffect enabled, enjoyed scan lines on high resolution monitors.
- Enabled experimental.retroTerminalEffect, turned the setting off, changed display scale. Retro tabs still scale scan lines.
2020-02-26 01:08:45 +01:00
|
|
|
const HRESULT hr2 = _PaintTerminalEffects();
|
|
|
|
if (FAILED(hr2))
|
|
|
|
{
|
|
|
|
_retroTerminalEffects = false;
|
|
|
|
LOG_HR_MSG(hr2, "Failed to paint terminal effects. Disabling.");
|
|
|
|
}
|
2019-12-12 14:44:01 +01:00
|
|
|
}
|
|
|
|
|
2019-08-30 00:23:07 +02:00
|
|
|
try
|
|
|
|
{
|
2019-09-24 00:06:47 +02:00
|
|
|
HRESULT hr = S_OK;
|
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
bool recreate = false;
|
2019-09-24 00:06:47 +02:00
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
// On anything but the first frame, try partial presentation.
|
|
|
|
// We'll do it first because if it fails, we'll try again with full presentation.
|
|
|
|
if (!_firstFrame)
|
2019-09-24 00:06:47 +02:00
|
|
|
{
|
2020-04-13 22:09:02 +02:00
|
|
|
hr = _dxgiSwapChain->Present1(1, 0, &_presentParams);
|
|
|
|
|
2019-09-24 00:06:47 +02:00
|
|
|
// These two error codes are indicated for destroy-and-recreate
|
2020-04-13 22:09:02 +02:00
|
|
|
// If we were told to destroy-and-recreate, we're going to skip straight into doing that
|
|
|
|
// and not try again with full presentation.
|
|
|
|
recreate = hr == DXGI_ERROR_DEVICE_REMOVED || hr == DXGI_ERROR_DEVICE_RESET;
|
|
|
|
|
|
|
|
// Log this as we actually don't expect it to happen, we just will try again
|
|
|
|
// below for robustness of our drawing.
|
|
|
|
if (FAILED(hr) && !recreate)
|
|
|
|
{
|
|
|
|
LOG_HR(hr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If it's the first frame through, we cannot do partial presentation.
|
|
|
|
// Also if partial presentation failed above and we weren't told to skip straight to
|
|
|
|
// device recreation.
|
|
|
|
// In both of these circumstances, do a full presentation.
|
|
|
|
if (_firstFrame || (FAILED(hr) && !recreate))
|
|
|
|
{
|
|
|
|
hr = _dxgiSwapChain->Present(1, 0);
|
|
|
|
_firstFrame = false;
|
|
|
|
|
|
|
|
// These two error codes are indicated for destroy-and-recreate
|
|
|
|
recreate = hr == DXGI_ERROR_DEVICE_REMOVED || hr == DXGI_ERROR_DEVICE_RESET;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now check for failure cases from either presentation mode.
|
|
|
|
if (FAILED(hr))
|
|
|
|
{
|
|
|
|
// If we were told to recreate the device surface, do that.
|
|
|
|
if (recreate)
|
2019-09-24 00:06:47 +02:00
|
|
|
{
|
|
|
|
// We don't need to end painting here, as the renderer has done it for us.
|
|
|
|
_ReleaseDeviceResources();
|
|
|
|
FAIL_FAST_IF_FAILED(InvalidateAll());
|
|
|
|
return E_PENDING; // Indicate a retry to the renderer.
|
|
|
|
}
|
2020-04-13 22:09:02 +02:00
|
|
|
// Otherwise, we don't know what to do with this error. Report it.
|
|
|
|
else
|
|
|
|
{
|
|
|
|
FAIL_FAST_HR(hr);
|
|
|
|
}
|
2019-09-24 00:06:47 +02:00
|
|
|
}
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
// Finally copy the front image (being presented now) onto the backing buffer
|
|
|
|
// (where we are about to draw the next frame) so we can draw only the differences
|
|
|
|
// next frame.
|
2019-08-30 00:23:07 +02:00
|
|
|
RETURN_IF_FAILED(_CopyFrontToBack());
|
|
|
|
_presentReady = false;
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
_presentDirty.clear();
|
2019-08-30 00:23:07 +02:00
|
|
|
_presentOffset = { 0 };
|
|
|
|
_presentScroll = { 0 };
|
|
|
|
_presentParams = { 0 };
|
|
|
|
}
|
|
|
|
CATCH_RETURN();
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return S_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - This is currently unused.
|
|
|
|
// Arguments:
|
|
|
|
// - <none>
|
|
|
|
// Return Value:
|
|
|
|
// - S_OK
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::ScrollFrame() noexcept
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
|
|
|
return S_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - This paints in the back most layer of the frame with the background color.
|
|
|
|
// Arguments:
|
|
|
|
// - <none>
|
|
|
|
// Return Value:
|
|
|
|
// - S_OK
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::PaintBackground() noexcept
|
2020-04-13 22:09:02 +02:00
|
|
|
try
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2020-05-11 22:54:29 +02:00
|
|
|
D2D1_COLOR_F nothing{ 0 };
|
|
|
|
if (_chainMode == SwapChainMode::ForHwnd)
|
|
|
|
{
|
|
|
|
// When we're drawing over an HWND target, we need to fully paint the background color.
|
|
|
|
nothing = _backgroundColor;
|
|
|
|
}
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
// If the entire thing is invalid, just use one big clear operation.
|
|
|
|
if (_invalidMap.all())
|
|
|
|
{
|
2020-06-20 00:14:01 +02:00
|
|
|
_d2dDeviceContext->Clear(nothing);
|
2020-04-13 22:09:02 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// Runs are counts of cells.
|
|
|
|
// Use a transform by the size of one cell to convert cells-to-pixels
|
|
|
|
// as we clear.
|
2020-06-20 00:14:01 +02:00
|
|
|
_d2dDeviceContext->SetTransform(D2D1::Matrix3x2F::Scale(_glyphCell));
|
2020-08-18 18:59:31 +02:00
|
|
|
for (const auto& rect : _invalidMap.runs())
|
2020-04-13 22:09:02 +02:00
|
|
|
{
|
|
|
|
// Use aliased.
|
|
|
|
// For graphics reasons, it'll look better because it will ensure that
|
|
|
|
// the edges are cut nice and sharp (not blended by anti-aliasing).
|
|
|
|
// For performance reasons, it takes a lot less work to not
|
|
|
|
// do anti-alias blending.
|
2020-06-20 00:14:01 +02:00
|
|
|
_d2dDeviceContext->PushAxisAlignedClip(rect, D2D1_ANTIALIAS_MODE_ALIASED);
|
|
|
|
_d2dDeviceContext->Clear(nothing);
|
|
|
|
_d2dDeviceContext->PopAxisAlignedClip();
|
2020-04-13 22:09:02 +02:00
|
|
|
}
|
2020-06-20 00:14:01 +02:00
|
|
|
_d2dDeviceContext->SetTransform(D2D1::Matrix3x2F::Identity());
|
2019-10-11 23:02:09 +02:00
|
|
|
}
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
return S_OK;
|
|
|
|
}
|
2020-04-13 22:09:02 +02:00
|
|
|
CATCH_RETURN()
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Places one line of text onto the screen at the given position
|
|
|
|
// Arguments:
|
|
|
|
// - clusters - Iterable collection of cluster information (text and columns it should consume)
|
|
|
|
// - coord - Character coordinate position in the cell grid
|
|
|
|
// - fTrimLeft - Whether or not to trim off the left half of a double wide character
|
|
|
|
// Return Value:
|
|
|
|
// - S_OK or relevant DirectX error
|
2020-07-15 18:40:42 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::PaintBufferLine(gsl::span<const Cluster> const clusters,
|
2019-06-11 22:27:09 +02:00
|
|
|
COORD const coord,
|
Make Conpty emit wrapped lines as actually wrapped lines (#4415)
## Summary of the Pull Request
Changes how conpty emits text to preserve line-wrap state, and additionally adds rudimentary support to the Windows Terminal for wrapped lines.
## References
* Does _not_ fix (!) #3088, but that might be lower down in conhost. This makes wt behave like conhost, so at least there's that
* Still needs a proper deferred EOL wrap implementation in #780, which is left as a todo
* #4200 is the mega bucket with all this work
* MSFT:16485846 was the first attempt at this task, which caused the regression MSFT:18123777 so we backed it out.
* #4403 - I made sure this worked with that PR before I even sent #4403
## PR Checklist
* [x] Closes #405
* [x] Closes #3367
* [x] I work here
* [x] Tests added/passed
* [n/a] Requires documentation to be updated
## Detailed Description of the Pull Request / Additional comments
I started with the following implementation:
When conpty is about to write the last column, note that we wrapped this line here. If the next character the vt renderer is told to paint get is supposed to be at the start of the following line, then we know that the previous line had wrapped, so we _won't_ emit the usual `\r\n` here, and we'll just continue emitting text.
However, this isn't _exactly_ right - if someone fills the row _exactly_ with text, the information that's available to the vt renderer isn't enough to know for sure if this line broke or not. It is possible for the client to write a full line of text, with a `\n` at the end, to manually break the line. So, I had to also add the `lineWrapped` param to the `IRenderEngine` interface, which is about half the files in this changelist.
## Validation Steps Performed
* Ran tests
* Checked how the Windows Terminal behaves with these changes
* Made sure that conhost/inception and gnome-terminal both act as you'd expect with wrapped lines from conpty
2020-02-27 17:40:11 +01:00
|
|
|
const bool /*trimLeft*/,
|
|
|
|
const bool /*lineWrapped*/) noexcept
|
2020-04-13 22:09:02 +02:00
|
|
|
try
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2020-04-13 22:09:02 +02:00
|
|
|
// Calculate positioning of our origin.
|
|
|
|
const D2D1_POINT_2F origin = til::point{ coord } * _glyphCell;
|
|
|
|
|
|
|
|
// Create the text layout
|
2020-06-22 18:13:09 +02:00
|
|
|
RETURN_IF_FAILED(_customLayout->Reset());
|
|
|
|
RETURN_IF_FAILED(_customLayout->AppendClusters(clusters));
|
2020-04-13 22:09:02 +02:00
|
|
|
|
|
|
|
// Layout then render the text
|
2020-06-22 18:13:09 +02:00
|
|
|
RETURN_IF_FAILED(_customLayout->Draw(_drawingContext.get(), _customRenderer.Get(), origin.x, origin.y));
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
return S_OK;
|
|
|
|
}
|
2020-04-13 22:09:02 +02:00
|
|
|
CATCH_RETURN()
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Paints lines around cells (draws in pieces of the grid)
|
|
|
|
// Arguments:
|
|
|
|
// - lines - Which grid lines (top, left, bottom, right) to draw
|
|
|
|
// - color - The color to use for drawing the lines
|
|
|
|
// - cchLine - Length of the line to draw in character cells
|
|
|
|
// - coordTarget - The X,Y character position in the grid where we should start drawing
|
|
|
|
// - We will draw rightward (+X) from here
|
|
|
|
// Return Value:
|
|
|
|
// - S_OK or relevant DirectX error
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::PaintBufferGridLines(GridLines const lines,
|
|
|
|
COLORREF const color,
|
|
|
|
size_t const cchLine,
|
|
|
|
COORD const coordTarget) noexcept
|
2020-04-13 22:09:02 +02:00
|
|
|
try
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
|
|
|
const auto existingColor = _d2dBrushForeground->GetColor();
|
2019-08-30 00:23:07 +02:00
|
|
|
const auto restoreBrushOnExit = wil::scope_exit([&]() noexcept { _d2dBrushForeground->SetColor(existingColor); });
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2019-08-21 01:14:26 +02:00
|
|
|
_d2dBrushForeground->SetColor(_ColorFFromColorRef(color));
|
2019-05-03 00:29:04 +02:00
|
|
|
|
Refactor grid line renderers with support for more line types (#7107)
This is a refactoring of the grid line renderers, adjusting the line
widths to scale with the font size, and optimising the implementation to
cut down on the number of draw calls. It also extends the supported grid
line types to include true underlines and strike-through lines in the
style of the active font.
The main gist of the optimisation was to render the horizontal lines
with a single draw call, instead of a loop with lots of little strokes
joined together. In the case of the vertical lines, which still needed
to be handled in a loop, I've tried to move the majority of static
calculations outside the loop, so there is bit of optimisation there
too.
At the same time this code was updated to support a variable stroke
width for the lines, instead of having them hardcoded to 1 pixel. The
width is now calculated as a fraction of the font size (0.025 "em"),
which is still going to be 1 pixel wide in most typical usage, but will
scale up appropriately if you zoom in far enough.
And in preparation for supporting the SGR strike-through attribute, and
true underlines, I've extended the grid line renders with options for
handling those line types as well. The offset and thickness of the lines
is obtained from the font metrics (rounded to a pixel width, with a
minimum of one pixel), so they match the style of the font.
VALIDATION
For now we're still only rendering grid lines, and only the top and
bottom lines in the case of the DirectX renderer in Windows Terminal. So
to test, I hacked in some code to force the renderer to use all the
different options, confirming that they were working in both the GDI and
DirectX renderers.
I've tested the output with a number of different fonts, comparing it
with the same text rendered in WordPad. For the most part they match
exactly, but there can be slight differences when we adjust the font
size for grid alignment. And in the case of the GDI renderer, where
we're working with pixel heights rather than points, it's difficult to
match the sizes exactly.
This is a first step towards supporting the strike-through attribute
(#6205) and true underlines (#2915).
Closes #6911
2020-07-31 00:43:37 +02:00
|
|
|
const D2D1_SIZE_F font = _glyphCell;
|
|
|
|
const D2D_POINT_2F target = { coordTarget.X * font.width, coordTarget.Y * font.height };
|
|
|
|
const auto fullRunWidth = font.width * gsl::narrow_cast<unsigned>(cchLine);
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2020-08-25 19:15:43 +02:00
|
|
|
const auto DrawLine = [=](const auto x0, const auto y0, const auto x1, const auto y1, const auto strokeWidth) noexcept {
|
Refactor grid line renderers with support for more line types (#7107)
This is a refactoring of the grid line renderers, adjusting the line
widths to scale with the font size, and optimising the implementation to
cut down on the number of draw calls. It also extends the supported grid
line types to include true underlines and strike-through lines in the
style of the active font.
The main gist of the optimisation was to render the horizontal lines
with a single draw call, instead of a loop with lots of little strokes
joined together. In the case of the vertical lines, which still needed
to be handled in a loop, I've tried to move the majority of static
calculations outside the loop, so there is bit of optimisation there
too.
At the same time this code was updated to support a variable stroke
width for the lines, instead of having them hardcoded to 1 pixel. The
width is now calculated as a fraction of the font size (0.025 "em"),
which is still going to be 1 pixel wide in most typical usage, but will
scale up appropriately if you zoom in far enough.
And in preparation for supporting the SGR strike-through attribute, and
true underlines, I've extended the grid line renders with options for
handling those line types as well. The offset and thickness of the lines
is obtained from the font metrics (rounded to a pixel width, with a
minimum of one pixel), so they match the style of the font.
VALIDATION
For now we're still only rendering grid lines, and only the top and
bottom lines in the case of the DirectX renderer in Windows Terminal. So
to test, I hacked in some code to force the renderer to use all the
different options, confirming that they were working in both the GDI and
DirectX renderers.
I've tested the output with a number of different fonts, comparing it
with the same text rendered in WordPad. For the most part they match
exactly, but there can be slight differences when we adjust the font
size for grid alignment. And in the case of the GDI renderer, where
we're working with pixel heights rather than points, it's difficult to
match the sizes exactly.
This is a first step towards supporting the strike-through attribute
(#6205) and true underlines (#2915).
Closes #6911
2020-07-31 00:43:37 +02:00
|
|
|
_d2dDeviceContext->DrawLine({ x0, y0 }, { x1, y1 }, _d2dBrushForeground.Get(), strokeWidth, _strokeStyle.Get());
|
|
|
|
};
|
2019-05-03 00:29:04 +02:00
|
|
|
|
Refactor grid line renderers with support for more line types (#7107)
This is a refactoring of the grid line renderers, adjusting the line
widths to scale with the font size, and optimising the implementation to
cut down on the number of draw calls. It also extends the supported grid
line types to include true underlines and strike-through lines in the
style of the active font.
The main gist of the optimisation was to render the horizontal lines
with a single draw call, instead of a loop with lots of little strokes
joined together. In the case of the vertical lines, which still needed
to be handled in a loop, I've tried to move the majority of static
calculations outside the loop, so there is bit of optimisation there
too.
At the same time this code was updated to support a variable stroke
width for the lines, instead of having them hardcoded to 1 pixel. The
width is now calculated as a fraction of the font size (0.025 "em"),
which is still going to be 1 pixel wide in most typical usage, but will
scale up appropriately if you zoom in far enough.
And in preparation for supporting the SGR strike-through attribute, and
true underlines, I've extended the grid line renders with options for
handling those line types as well. The offset and thickness of the lines
is obtained from the font metrics (rounded to a pixel width, with a
minimum of one pixel), so they match the style of the font.
VALIDATION
For now we're still only rendering grid lines, and only the top and
bottom lines in the case of the DirectX renderer in Windows Terminal. So
to test, I hacked in some code to force the renderer to use all the
different options, confirming that they were working in both the GDI and
DirectX renderers.
I've tested the output with a number of different fonts, comparing it
with the same text rendered in WordPad. For the most part they match
exactly, but there can be slight differences when we adjust the font
size for grid alignment. And in the case of the GDI renderer, where
we're working with pixel heights rather than points, it's difficult to
match the sizes exactly.
This is a first step towards supporting the strike-through attribute
(#6205) and true underlines (#2915).
Closes #6911
2020-07-31 00:43:37 +02:00
|
|
|
// NOTE: Line coordinates are centered within the line, so they need to be
|
|
|
|
// offset by half the stroke width. For the start coordinate we add half
|
|
|
|
// the stroke width, and for the end coordinate we subtract half the width.
|
2019-05-03 00:29:04 +02:00
|
|
|
|
Refactor grid line renderers with support for more line types (#7107)
This is a refactoring of the grid line renderers, adjusting the line
widths to scale with the font size, and optimising the implementation to
cut down on the number of draw calls. It also extends the supported grid
line types to include true underlines and strike-through lines in the
style of the active font.
The main gist of the optimisation was to render the horizontal lines
with a single draw call, instead of a loop with lots of little strokes
joined together. In the case of the vertical lines, which still needed
to be handled in a loop, I've tried to move the majority of static
calculations outside the loop, so there is bit of optimisation there
too.
At the same time this code was updated to support a variable stroke
width for the lines, instead of having them hardcoded to 1 pixel. The
width is now calculated as a fraction of the font size (0.025 "em"),
which is still going to be 1 pixel wide in most typical usage, but will
scale up appropriately if you zoom in far enough.
And in preparation for supporting the SGR strike-through attribute, and
true underlines, I've extended the grid line renders with options for
handling those line types as well. The offset and thickness of the lines
is obtained from the font metrics (rounded to a pixel width, with a
minimum of one pixel), so they match the style of the font.
VALIDATION
For now we're still only rendering grid lines, and only the top and
bottom lines in the case of the DirectX renderer in Windows Terminal. So
to test, I hacked in some code to force the renderer to use all the
different options, confirming that they were working in both the GDI and
DirectX renderers.
I've tested the output with a number of different fonts, comparing it
with the same text rendered in WordPad. For the most part they match
exactly, but there can be slight differences when we adjust the font
size for grid alignment. And in the case of the GDI renderer, where
we're working with pixel heights rather than points, it's difficult to
match the sizes exactly.
This is a first step towards supporting the strike-through attribute
(#6205) and true underlines (#2915).
Closes #6911
2020-07-31 00:43:37 +02:00
|
|
|
if (lines & (GridLines::Left | GridLines::Right))
|
|
|
|
{
|
|
|
|
const auto halfGridlineWidth = _lineMetrics.gridlineWidth / 2.0f;
|
|
|
|
const auto startY = target.y + halfGridlineWidth;
|
|
|
|
const auto endY = target.y + font.height - halfGridlineWidth;
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
if (lines & GridLines::Left)
|
|
|
|
{
|
Refactor grid line renderers with support for more line types (#7107)
This is a refactoring of the grid line renderers, adjusting the line
widths to scale with the font size, and optimising the implementation to
cut down on the number of draw calls. It also extends the supported grid
line types to include true underlines and strike-through lines in the
style of the active font.
The main gist of the optimisation was to render the horizontal lines
with a single draw call, instead of a loop with lots of little strokes
joined together. In the case of the vertical lines, which still needed
to be handled in a loop, I've tried to move the majority of static
calculations outside the loop, so there is bit of optimisation there
too.
At the same time this code was updated to support a variable stroke
width for the lines, instead of having them hardcoded to 1 pixel. The
width is now calculated as a fraction of the font size (0.025 "em"),
which is still going to be 1 pixel wide in most typical usage, but will
scale up appropriately if you zoom in far enough.
And in preparation for supporting the SGR strike-through attribute, and
true underlines, I've extended the grid line renders with options for
handling those line types as well. The offset and thickness of the lines
is obtained from the font metrics (rounded to a pixel width, with a
minimum of one pixel), so they match the style of the font.
VALIDATION
For now we're still only rendering grid lines, and only the top and
bottom lines in the case of the DirectX renderer in Windows Terminal. So
to test, I hacked in some code to force the renderer to use all the
different options, confirming that they were working in both the GDI and
DirectX renderers.
I've tested the output with a number of different fonts, comparing it
with the same text rendered in WordPad. For the most part they match
exactly, but there can be slight differences when we adjust the font
size for grid alignment. And in the case of the GDI renderer, where
we're working with pixel heights rather than points, it's difficult to
match the sizes exactly.
This is a first step towards supporting the strike-through attribute
(#6205) and true underlines (#2915).
Closes #6911
2020-07-31 00:43:37 +02:00
|
|
|
auto x = target.x + halfGridlineWidth;
|
|
|
|
for (size_t i = 0; i < cchLine; i++, x += font.width)
|
|
|
|
{
|
|
|
|
DrawLine(x, startY, x, endY, _lineMetrics.gridlineWidth);
|
|
|
|
}
|
|
|
|
}
|
2019-05-03 00:29:04 +02:00
|
|
|
|
Refactor grid line renderers with support for more line types (#7107)
This is a refactoring of the grid line renderers, adjusting the line
widths to scale with the font size, and optimising the implementation to
cut down on the number of draw calls. It also extends the supported grid
line types to include true underlines and strike-through lines in the
style of the active font.
The main gist of the optimisation was to render the horizontal lines
with a single draw call, instead of a loop with lots of little strokes
joined together. In the case of the vertical lines, which still needed
to be handled in a loop, I've tried to move the majority of static
calculations outside the loop, so there is bit of optimisation there
too.
At the same time this code was updated to support a variable stroke
width for the lines, instead of having them hardcoded to 1 pixel. The
width is now calculated as a fraction of the font size (0.025 "em"),
which is still going to be 1 pixel wide in most typical usage, but will
scale up appropriately if you zoom in far enough.
And in preparation for supporting the SGR strike-through attribute, and
true underlines, I've extended the grid line renders with options for
handling those line types as well. The offset and thickness of the lines
is obtained from the font metrics (rounded to a pixel width, with a
minimum of one pixel), so they match the style of the font.
VALIDATION
For now we're still only rendering grid lines, and only the top and
bottom lines in the case of the DirectX renderer in Windows Terminal. So
to test, I hacked in some code to force the renderer to use all the
different options, confirming that they were working in both the GDI and
DirectX renderers.
I've tested the output with a number of different fonts, comparing it
with the same text rendered in WordPad. For the most part they match
exactly, but there can be slight differences when we adjust the font
size for grid alignment. And in the case of the GDI renderer, where
we're working with pixel heights rather than points, it's difficult to
match the sizes exactly.
This is a first step towards supporting the strike-through attribute
(#6205) and true underlines (#2915).
Closes #6911
2020-07-31 00:43:37 +02:00
|
|
|
if (lines & GridLines::Right)
|
|
|
|
{
|
|
|
|
auto x = target.x + font.width - halfGridlineWidth;
|
|
|
|
for (size_t i = 0; i < cchLine; i++, x += font.width)
|
|
|
|
{
|
|
|
|
DrawLine(x, startY, x, endY, _lineMetrics.gridlineWidth);
|
|
|
|
}
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
Refactor grid line renderers with support for more line types (#7107)
This is a refactoring of the grid line renderers, adjusting the line
widths to scale with the font size, and optimising the implementation to
cut down on the number of draw calls. It also extends the supported grid
line types to include true underlines and strike-through lines in the
style of the active font.
The main gist of the optimisation was to render the horizontal lines
with a single draw call, instead of a loop with lots of little strokes
joined together. In the case of the vertical lines, which still needed
to be handled in a loop, I've tried to move the majority of static
calculations outside the loop, so there is bit of optimisation there
too.
At the same time this code was updated to support a variable stroke
width for the lines, instead of having them hardcoded to 1 pixel. The
width is now calculated as a fraction of the font size (0.025 "em"),
which is still going to be 1 pixel wide in most typical usage, but will
scale up appropriately if you zoom in far enough.
And in preparation for supporting the SGR strike-through attribute, and
true underlines, I've extended the grid line renders with options for
handling those line types as well. The offset and thickness of the lines
is obtained from the font metrics (rounded to a pixel width, with a
minimum of one pixel), so they match the style of the font.
VALIDATION
For now we're still only rendering grid lines, and only the top and
bottom lines in the case of the DirectX renderer in Windows Terminal. So
to test, I hacked in some code to force the renderer to use all the
different options, confirming that they were working in both the GDI and
DirectX renderers.
I've tested the output with a number of different fonts, comparing it
with the same text rendered in WordPad. For the most part they match
exactly, but there can be slight differences when we adjust the font
size for grid alignment. And in the case of the GDI renderer, where
we're working with pixel heights rather than points, it's difficult to
match the sizes exactly.
This is a first step towards supporting the strike-through attribute
(#6205) and true underlines (#2915).
Closes #6911
2020-07-31 00:43:37 +02:00
|
|
|
}
|
2019-05-03 00:29:04 +02:00
|
|
|
|
Refactor grid line renderers with support for more line types (#7107)
This is a refactoring of the grid line renderers, adjusting the line
widths to scale with the font size, and optimising the implementation to
cut down on the number of draw calls. It also extends the supported grid
line types to include true underlines and strike-through lines in the
style of the active font.
The main gist of the optimisation was to render the horizontal lines
with a single draw call, instead of a loop with lots of little strokes
joined together. In the case of the vertical lines, which still needed
to be handled in a loop, I've tried to move the majority of static
calculations outside the loop, so there is bit of optimisation there
too.
At the same time this code was updated to support a variable stroke
width for the lines, instead of having them hardcoded to 1 pixel. The
width is now calculated as a fraction of the font size (0.025 "em"),
which is still going to be 1 pixel wide in most typical usage, but will
scale up appropriately if you zoom in far enough.
And in preparation for supporting the SGR strike-through attribute, and
true underlines, I've extended the grid line renders with options for
handling those line types as well. The offset and thickness of the lines
is obtained from the font metrics (rounded to a pixel width, with a
minimum of one pixel), so they match the style of the font.
VALIDATION
For now we're still only rendering grid lines, and only the top and
bottom lines in the case of the DirectX renderer in Windows Terminal. So
to test, I hacked in some code to force the renderer to use all the
different options, confirming that they were working in both the GDI and
DirectX renderers.
I've tested the output with a number of different fonts, comparing it
with the same text rendered in WordPad. For the most part they match
exactly, but there can be slight differences when we adjust the font
size for grid alignment. And in the case of the GDI renderer, where
we're working with pixel heights rather than points, it's difficult to
match the sizes exactly.
This is a first step towards supporting the strike-through attribute
(#6205) and true underlines (#2915).
Closes #6911
2020-07-31 00:43:37 +02:00
|
|
|
if (lines & (GridLines::Top | GridLines::Bottom))
|
|
|
|
{
|
|
|
|
const auto halfGridlineWidth = _lineMetrics.gridlineWidth / 2.0f;
|
|
|
|
const auto startX = target.x + halfGridlineWidth;
|
|
|
|
const auto endX = target.x + fullRunWidth - halfGridlineWidth;
|
2019-05-03 00:29:04 +02:00
|
|
|
|
Refactor grid line renderers with support for more line types (#7107)
This is a refactoring of the grid line renderers, adjusting the line
widths to scale with the font size, and optimising the implementation to
cut down on the number of draw calls. It also extends the supported grid
line types to include true underlines and strike-through lines in the
style of the active font.
The main gist of the optimisation was to render the horizontal lines
with a single draw call, instead of a loop with lots of little strokes
joined together. In the case of the vertical lines, which still needed
to be handled in a loop, I've tried to move the majority of static
calculations outside the loop, so there is bit of optimisation there
too.
At the same time this code was updated to support a variable stroke
width for the lines, instead of having them hardcoded to 1 pixel. The
width is now calculated as a fraction of the font size (0.025 "em"),
which is still going to be 1 pixel wide in most typical usage, but will
scale up appropriately if you zoom in far enough.
And in preparation for supporting the SGR strike-through attribute, and
true underlines, I've extended the grid line renders with options for
handling those line types as well. The offset and thickness of the lines
is obtained from the font metrics (rounded to a pixel width, with a
minimum of one pixel), so they match the style of the font.
VALIDATION
For now we're still only rendering grid lines, and only the top and
bottom lines in the case of the DirectX renderer in Windows Terminal. So
to test, I hacked in some code to force the renderer to use all the
different options, confirming that they were working in both the GDI and
DirectX renderers.
I've tested the output with a number of different fonts, comparing it
with the same text rendered in WordPad. For the most part they match
exactly, but there can be slight differences when we adjust the font
size for grid alignment. And in the case of the GDI renderer, where
we're working with pixel heights rather than points, it's difficult to
match the sizes exactly.
This is a first step towards supporting the strike-through attribute
(#6205) and true underlines (#2915).
Closes #6911
2020-07-31 00:43:37 +02:00
|
|
|
if (lines & GridLines::Top)
|
|
|
|
{
|
|
|
|
const auto y = target.y + halfGridlineWidth;
|
|
|
|
DrawLine(startX, y, endX, y, _lineMetrics.gridlineWidth);
|
|
|
|
}
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
if (lines & GridLines::Bottom)
|
|
|
|
{
|
Refactor grid line renderers with support for more line types (#7107)
This is a refactoring of the grid line renderers, adjusting the line
widths to scale with the font size, and optimising the implementation to
cut down on the number of draw calls. It also extends the supported grid
line types to include true underlines and strike-through lines in the
style of the active font.
The main gist of the optimisation was to render the horizontal lines
with a single draw call, instead of a loop with lots of little strokes
joined together. In the case of the vertical lines, which still needed
to be handled in a loop, I've tried to move the majority of static
calculations outside the loop, so there is bit of optimisation there
too.
At the same time this code was updated to support a variable stroke
width for the lines, instead of having them hardcoded to 1 pixel. The
width is now calculated as a fraction of the font size (0.025 "em"),
which is still going to be 1 pixel wide in most typical usage, but will
scale up appropriately if you zoom in far enough.
And in preparation for supporting the SGR strike-through attribute, and
true underlines, I've extended the grid line renders with options for
handling those line types as well. The offset and thickness of the lines
is obtained from the font metrics (rounded to a pixel width, with a
minimum of one pixel), so they match the style of the font.
VALIDATION
For now we're still only rendering grid lines, and only the top and
bottom lines in the case of the DirectX renderer in Windows Terminal. So
to test, I hacked in some code to force the renderer to use all the
different options, confirming that they were working in both the GDI and
DirectX renderers.
I've tested the output with a number of different fonts, comparing it
with the same text rendered in WordPad. For the most part they match
exactly, but there can be slight differences when we adjust the font
size for grid alignment. And in the case of the GDI renderer, where
we're working with pixel heights rather than points, it's difficult to
match the sizes exactly.
This is a first step towards supporting the strike-through attribute
(#6205) and true underlines (#2915).
Closes #6911
2020-07-31 00:43:37 +02:00
|
|
|
const auto y = target.y + font.height - halfGridlineWidth;
|
|
|
|
DrawLine(startX, y, endX, y, _lineMetrics.gridlineWidth);
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
Refactor grid line renderers with support for more line types (#7107)
This is a refactoring of the grid line renderers, adjusting the line
widths to scale with the font size, and optimising the implementation to
cut down on the number of draw calls. It also extends the supported grid
line types to include true underlines and strike-through lines in the
style of the active font.
The main gist of the optimisation was to render the horizontal lines
with a single draw call, instead of a loop with lots of little strokes
joined together. In the case of the vertical lines, which still needed
to be handled in a loop, I've tried to move the majority of static
calculations outside the loop, so there is bit of optimisation there
too.
At the same time this code was updated to support a variable stroke
width for the lines, instead of having them hardcoded to 1 pixel. The
width is now calculated as a fraction of the font size (0.025 "em"),
which is still going to be 1 pixel wide in most typical usage, but will
scale up appropriately if you zoom in far enough.
And in preparation for supporting the SGR strike-through attribute, and
true underlines, I've extended the grid line renders with options for
handling those line types as well. The offset and thickness of the lines
is obtained from the font metrics (rounded to a pixel width, with a
minimum of one pixel), so they match the style of the font.
VALIDATION
For now we're still only rendering grid lines, and only the top and
bottom lines in the case of the DirectX renderer in Windows Terminal. So
to test, I hacked in some code to force the renderer to use all the
different options, confirming that they were working in both the GDI and
DirectX renderers.
I've tested the output with a number of different fonts, comparing it
with the same text rendered in WordPad. For the most part they match
exactly, but there can be slight differences when we adjust the font
size for grid alignment. And in the case of the GDI renderer, where
we're working with pixel heights rather than points, it's difficult to
match the sizes exactly.
This is a first step towards supporting the strike-through attribute
(#6205) and true underlines (#2915).
Closes #6911
2020-07-31 00:43:37 +02:00
|
|
|
}
|
2019-05-03 00:29:04 +02:00
|
|
|
|
Refactor grid line renderers with support for more line types (#7107)
This is a refactoring of the grid line renderers, adjusting the line
widths to scale with the font size, and optimising the implementation to
cut down on the number of draw calls. It also extends the supported grid
line types to include true underlines and strike-through lines in the
style of the active font.
The main gist of the optimisation was to render the horizontal lines
with a single draw call, instead of a loop with lots of little strokes
joined together. In the case of the vertical lines, which still needed
to be handled in a loop, I've tried to move the majority of static
calculations outside the loop, so there is bit of optimisation there
too.
At the same time this code was updated to support a variable stroke
width for the lines, instead of having them hardcoded to 1 pixel. The
width is now calculated as a fraction of the font size (0.025 "em"),
which is still going to be 1 pixel wide in most typical usage, but will
scale up appropriately if you zoom in far enough.
And in preparation for supporting the SGR strike-through attribute, and
true underlines, I've extended the grid line renders with options for
handling those line types as well. The offset and thickness of the lines
is obtained from the font metrics (rounded to a pixel width, with a
minimum of one pixel), so they match the style of the font.
VALIDATION
For now we're still only rendering grid lines, and only the top and
bottom lines in the case of the DirectX renderer in Windows Terminal. So
to test, I hacked in some code to force the renderer to use all the
different options, confirming that they were working in both the GDI and
DirectX renderers.
I've tested the output with a number of different fonts, comparing it
with the same text rendered in WordPad. For the most part they match
exactly, but there can be slight differences when we adjust the font
size for grid alignment. And in the case of the GDI renderer, where
we're working with pixel heights rather than points, it's difficult to
match the sizes exactly.
This is a first step towards supporting the strike-through attribute
(#6205) and true underlines (#2915).
Closes #6911
2020-07-31 00:43:37 +02:00
|
|
|
// In the case of the underline and strikethrough offsets, the stroke width
|
|
|
|
// is already accounted for, so they don't require further adjustments.
|
2019-05-03 00:29:04 +02:00
|
|
|
|
Add support for the "doubly underlined" graphic rendition attribute (#7223)
This PR adds support for the ANSI _doubly underlined_ graphic rendition
attribute, which is enabled by the `SGR 21` escape sequence.
There was already an `ExtendedAttributes::DoublyUnderlined` flag in the
`TextAttribute` class, but I needed to add `SetDoublyUnderlined` and
`IsDoublyUnderlined` methods to access that flag, and update the
`SetGraphicsRendition` methods of the two dispatchers to set the
attribute on receipt of the `SGR 21` sequence. I also had to update the
existing `SGR 24` handler to reset _DoublyUnderlined_ in addition to
_Underlined_, since they share the same reset sequence.
For the rendering, I've added a new grid line type, which essentially
just draws an additional line with the same thickness as the regular
underline, but slightly below it - I found a gap of around 0.05 "em"
between the lines looked best. If there isn't enough space in the cell
for that gap, the second line will be clamped to overlap the first, so
you then just get a thicker line. If there isn't even enough space below
for a thicker line, we move the offset _above_ the first line, but just
enough to make it thicker.
The only other complication was the update of the `Xterm256Engine` in
the VT renderer. As mentioned above, the two underline attributes share
the same reset sequence, so to forward that state over conpty we require
a slightly more complicated process than with most other attributes
(similar to _Bold_ and _Faint_). We first check whether either underline
attribute needs to be turned off to send the reset sequence, and then
check individually if each of them needs to be turned back on again.
## Validation Steps Performed
For testing, I've extended the existing attribute tests in
`AdapterTest`, `VTRendererTest`, and `ScreenBufferTests`, to make sure
we're covering both the _Underlined_ and _DoublyUnderlined_ attributes.
I've also manually tested the `SGR 21` sequence in conhost and Windows
Terminal, with a variety of fonts and font sizes, to make sure the
rendering was reasonably distinguishable from a single underline.
Closes #2916
2020-08-10 19:06:16 +02:00
|
|
|
if (lines & (GridLines::Underline | GridLines::DoubleUnderline))
|
Refactor grid line renderers with support for more line types (#7107)
This is a refactoring of the grid line renderers, adjusting the line
widths to scale with the font size, and optimising the implementation to
cut down on the number of draw calls. It also extends the supported grid
line types to include true underlines and strike-through lines in the
style of the active font.
The main gist of the optimisation was to render the horizontal lines
with a single draw call, instead of a loop with lots of little strokes
joined together. In the case of the vertical lines, which still needed
to be handled in a loop, I've tried to move the majority of static
calculations outside the loop, so there is bit of optimisation there
too.
At the same time this code was updated to support a variable stroke
width for the lines, instead of having them hardcoded to 1 pixel. The
width is now calculated as a fraction of the font size (0.025 "em"),
which is still going to be 1 pixel wide in most typical usage, but will
scale up appropriately if you zoom in far enough.
And in preparation for supporting the SGR strike-through attribute, and
true underlines, I've extended the grid line renders with options for
handling those line types as well. The offset and thickness of the lines
is obtained from the font metrics (rounded to a pixel width, with a
minimum of one pixel), so they match the style of the font.
VALIDATION
For now we're still only rendering grid lines, and only the top and
bottom lines in the case of the DirectX renderer in Windows Terminal. So
to test, I hacked in some code to force the renderer to use all the
different options, confirming that they were working in both the GDI and
DirectX renderers.
I've tested the output with a number of different fonts, comparing it
with the same text rendered in WordPad. For the most part they match
exactly, but there can be slight differences when we adjust the font
size for grid alignment. And in the case of the GDI renderer, where
we're working with pixel heights rather than points, it's difficult to
match the sizes exactly.
This is a first step towards supporting the strike-through attribute
(#6205) and true underlines (#2915).
Closes #6911
2020-07-31 00:43:37 +02:00
|
|
|
{
|
|
|
|
const auto halfUnderlineWidth = _lineMetrics.underlineWidth / 2.0f;
|
|
|
|
const auto startX = target.x + halfUnderlineWidth;
|
|
|
|
const auto endX = target.x + fullRunWidth - halfUnderlineWidth;
|
|
|
|
const auto y = target.y + _lineMetrics.underlineOffset;
|
2019-05-03 00:29:04 +02:00
|
|
|
|
Refactor grid line renderers with support for more line types (#7107)
This is a refactoring of the grid line renderers, adjusting the line
widths to scale with the font size, and optimising the implementation to
cut down on the number of draw calls. It also extends the supported grid
line types to include true underlines and strike-through lines in the
style of the active font.
The main gist of the optimisation was to render the horizontal lines
with a single draw call, instead of a loop with lots of little strokes
joined together. In the case of the vertical lines, which still needed
to be handled in a loop, I've tried to move the majority of static
calculations outside the loop, so there is bit of optimisation there
too.
At the same time this code was updated to support a variable stroke
width for the lines, instead of having them hardcoded to 1 pixel. The
width is now calculated as a fraction of the font size (0.025 "em"),
which is still going to be 1 pixel wide in most typical usage, but will
scale up appropriately if you zoom in far enough.
And in preparation for supporting the SGR strike-through attribute, and
true underlines, I've extended the grid line renders with options for
handling those line types as well. The offset and thickness of the lines
is obtained from the font metrics (rounded to a pixel width, with a
minimum of one pixel), so they match the style of the font.
VALIDATION
For now we're still only rendering grid lines, and only the top and
bottom lines in the case of the DirectX renderer in Windows Terminal. So
to test, I hacked in some code to force the renderer to use all the
different options, confirming that they were working in both the GDI and
DirectX renderers.
I've tested the output with a number of different fonts, comparing it
with the same text rendered in WordPad. For the most part they match
exactly, but there can be slight differences when we adjust the font
size for grid alignment. And in the case of the GDI renderer, where
we're working with pixel heights rather than points, it's difficult to
match the sizes exactly.
This is a first step towards supporting the strike-through attribute
(#6205) and true underlines (#2915).
Closes #6911
2020-07-31 00:43:37 +02:00
|
|
|
DrawLine(startX, y, endX, y, _lineMetrics.underlineWidth);
|
Add support for the "doubly underlined" graphic rendition attribute (#7223)
This PR adds support for the ANSI _doubly underlined_ graphic rendition
attribute, which is enabled by the `SGR 21` escape sequence.
There was already an `ExtendedAttributes::DoublyUnderlined` flag in the
`TextAttribute` class, but I needed to add `SetDoublyUnderlined` and
`IsDoublyUnderlined` methods to access that flag, and update the
`SetGraphicsRendition` methods of the two dispatchers to set the
attribute on receipt of the `SGR 21` sequence. I also had to update the
existing `SGR 24` handler to reset _DoublyUnderlined_ in addition to
_Underlined_, since they share the same reset sequence.
For the rendering, I've added a new grid line type, which essentially
just draws an additional line with the same thickness as the regular
underline, but slightly below it - I found a gap of around 0.05 "em"
between the lines looked best. If there isn't enough space in the cell
for that gap, the second line will be clamped to overlap the first, so
you then just get a thicker line. If there isn't even enough space below
for a thicker line, we move the offset _above_ the first line, but just
enough to make it thicker.
The only other complication was the update of the `Xterm256Engine` in
the VT renderer. As mentioned above, the two underline attributes share
the same reset sequence, so to forward that state over conpty we require
a slightly more complicated process than with most other attributes
(similar to _Bold_ and _Faint_). We first check whether either underline
attribute needs to be turned off to send the reset sequence, and then
check individually if each of them needs to be turned back on again.
## Validation Steps Performed
For testing, I've extended the existing attribute tests in
`AdapterTest`, `VTRendererTest`, and `ScreenBufferTests`, to make sure
we're covering both the _Underlined_ and _DoublyUnderlined_ attributes.
I've also manually tested the `SGR 21` sequence in conhost and Windows
Terminal, with a variety of fonts and font sizes, to make sure the
rendering was reasonably distinguishable from a single underline.
Closes #2916
2020-08-10 19:06:16 +02:00
|
|
|
|
|
|
|
if (lines & GridLines::DoubleUnderline)
|
|
|
|
{
|
|
|
|
const auto y2 = target.y + _lineMetrics.underlineOffset2;
|
|
|
|
DrawLine(startX, y2, endX, y2, _lineMetrics.underlineWidth);
|
|
|
|
}
|
Refactor grid line renderers with support for more line types (#7107)
This is a refactoring of the grid line renderers, adjusting the line
widths to scale with the font size, and optimising the implementation to
cut down on the number of draw calls. It also extends the supported grid
line types to include true underlines and strike-through lines in the
style of the active font.
The main gist of the optimisation was to render the horizontal lines
with a single draw call, instead of a loop with lots of little strokes
joined together. In the case of the vertical lines, which still needed
to be handled in a loop, I've tried to move the majority of static
calculations outside the loop, so there is bit of optimisation there
too.
At the same time this code was updated to support a variable stroke
width for the lines, instead of having them hardcoded to 1 pixel. The
width is now calculated as a fraction of the font size (0.025 "em"),
which is still going to be 1 pixel wide in most typical usage, but will
scale up appropriately if you zoom in far enough.
And in preparation for supporting the SGR strike-through attribute, and
true underlines, I've extended the grid line renders with options for
handling those line types as well. The offset and thickness of the lines
is obtained from the font metrics (rounded to a pixel width, with a
minimum of one pixel), so they match the style of the font.
VALIDATION
For now we're still only rendering grid lines, and only the top and
bottom lines in the case of the DirectX renderer in Windows Terminal. So
to test, I hacked in some code to force the renderer to use all the
different options, confirming that they were working in both the GDI and
DirectX renderers.
I've tested the output with a number of different fonts, comparing it
with the same text rendered in WordPad. For the most part they match
exactly, but there can be slight differences when we adjust the font
size for grid alignment. And in the case of the GDI renderer, where
we're working with pixel heights rather than points, it's difficult to
match the sizes exactly.
This is a first step towards supporting the strike-through attribute
(#6205) and true underlines (#2915).
Closes #6911
2020-07-31 00:43:37 +02:00
|
|
|
}
|
2019-05-03 00:29:04 +02:00
|
|
|
|
Refactor grid line renderers with support for more line types (#7107)
This is a refactoring of the grid line renderers, adjusting the line
widths to scale with the font size, and optimising the implementation to
cut down on the number of draw calls. It also extends the supported grid
line types to include true underlines and strike-through lines in the
style of the active font.
The main gist of the optimisation was to render the horizontal lines
with a single draw call, instead of a loop with lots of little strokes
joined together. In the case of the vertical lines, which still needed
to be handled in a loop, I've tried to move the majority of static
calculations outside the loop, so there is bit of optimisation there
too.
At the same time this code was updated to support a variable stroke
width for the lines, instead of having them hardcoded to 1 pixel. The
width is now calculated as a fraction of the font size (0.025 "em"),
which is still going to be 1 pixel wide in most typical usage, but will
scale up appropriately if you zoom in far enough.
And in preparation for supporting the SGR strike-through attribute, and
true underlines, I've extended the grid line renders with options for
handling those line types as well. The offset and thickness of the lines
is obtained from the font metrics (rounded to a pixel width, with a
minimum of one pixel), so they match the style of the font.
VALIDATION
For now we're still only rendering grid lines, and only the top and
bottom lines in the case of the DirectX renderer in Windows Terminal. So
to test, I hacked in some code to force the renderer to use all the
different options, confirming that they were working in both the GDI and
DirectX renderers.
I've tested the output with a number of different fonts, comparing it
with the same text rendered in WordPad. For the most part they match
exactly, but there can be slight differences when we adjust the font
size for grid alignment. And in the case of the GDI renderer, where
we're working with pixel heights rather than points, it's difficult to
match the sizes exactly.
This is a first step towards supporting the strike-through attribute
(#6205) and true underlines (#2915).
Closes #6911
2020-07-31 00:43:37 +02:00
|
|
|
if (lines & GridLines::Strikethrough)
|
|
|
|
{
|
|
|
|
const auto halfStrikethroughWidth = _lineMetrics.strikethroughWidth / 2.0f;
|
|
|
|
const auto startX = target.x + halfStrikethroughWidth;
|
|
|
|
const auto endX = target.x + fullRunWidth - halfStrikethroughWidth;
|
|
|
|
const auto y = target.y + _lineMetrics.strikethroughOffset;
|
|
|
|
|
|
|
|
DrawLine(startX, y, endX, y, _lineMetrics.strikethroughWidth);
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return S_OK;
|
|
|
|
}
|
2020-04-13 22:09:02 +02:00
|
|
|
CATCH_RETURN()
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Paints an overlay highlight on a portion of the frame to represent selected text
|
|
|
|
// Arguments:
|
|
|
|
// - rect - Rectangle to invert or highlight to make the selection area
|
|
|
|
// Return Value:
|
|
|
|
// - S_OK or relevant DirectX error.
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::PaintSelection(const SMALL_RECT rect) noexcept
|
2020-04-13 22:09:02 +02:00
|
|
|
try
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2020-06-22 18:13:09 +02:00
|
|
|
// If a clip rectangle is in place from drawing the text layer, remove it here.
|
|
|
|
LOG_IF_FAILED(_customRenderer->EndClip(_drawingContext.get()));
|
|
|
|
|
2019-05-03 00:29:04 +02:00
|
|
|
const auto existingColor = _d2dBrushForeground->GetColor();
|
|
|
|
|
2019-11-13 19:17:39 +01:00
|
|
|
_d2dBrushForeground->SetColor(_selectionBackground);
|
2019-08-30 00:23:07 +02:00
|
|
|
const auto resetColorOnExit = wil::scope_exit([&]() noexcept { _d2dBrushForeground->SetColor(existingColor); });
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
const D2D1_RECT_F draw = til::rectangle{ Viewport::FromExclusive(rect).ToInclusive() }.scale_up(_glyphCell);
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2020-06-20 00:14:01 +02:00
|
|
|
_d2dDeviceContext->FillRectangle(draw, _d2dBrushForeground.Get());
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
return S_OK;
|
|
|
|
}
|
2020-04-13 22:09:02 +02:00
|
|
|
CATCH_RETURN()
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
// Routine Description:
|
2020-06-04 14:58:22 +02:00
|
|
|
// - Does nothing. Our cursor is drawn in CustomTextRenderer::DrawGlyphRun,
|
|
|
|
// either above or below the text.
|
2019-05-03 00:29:04 +02:00
|
|
|
// Arguments:
|
2020-06-04 14:58:22 +02:00
|
|
|
// - options - unused
|
2019-05-03 00:29:04 +02:00
|
|
|
// Return Value:
|
2020-06-04 14:58:22 +02:00
|
|
|
// - S_OK
|
|
|
|
[[nodiscard]] HRESULT DxEngine::PaintCursor(const CursorOptions& /*options*/) noexcept
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
|
|
|
return S_OK;
|
|
|
|
}
|
|
|
|
|
2019-12-12 14:44:01 +01:00
|
|
|
// Routine Description:
|
|
|
|
// - Paint terminal effects.
|
|
|
|
// Arguments:
|
|
|
|
// Return Value:
|
|
|
|
// - S_OK or relevant DirectX error.
|
|
|
|
[[nodiscard]] HRESULT DxEngine::_PaintTerminalEffects() noexcept
|
|
|
|
try
|
|
|
|
{
|
|
|
|
// Should have been initialized.
|
|
|
|
RETURN_HR_IF(E_NOT_VALID_STATE, !_framebufferCapture);
|
|
|
|
|
|
|
|
// Capture current frame in swap chain to a texture.
|
|
|
|
::Microsoft::WRL::ComPtr<ID3D11Texture2D> swapBuffer;
|
|
|
|
RETURN_IF_FAILED(_dxgiSwapChain->GetBuffer(0, IID_PPV_ARGS(&swapBuffer)));
|
|
|
|
_d3dDeviceContext->CopyResource(_framebufferCapture.Get(), swapBuffer.Get());
|
|
|
|
|
|
|
|
// Prepare captured texture as input resource to shader program.
|
|
|
|
D3D11_TEXTURE2D_DESC desc;
|
|
|
|
_framebufferCapture->GetDesc(&desc);
|
|
|
|
|
|
|
|
D3D11_SHADER_RESOURCE_VIEW_DESC srvDesc;
|
|
|
|
srvDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
|
|
|
|
srvDesc.Texture2D.MostDetailedMip = 0;
|
|
|
|
srvDesc.Texture2D.MipLevels = desc.MipLevels;
|
|
|
|
srvDesc.Format = desc.Format;
|
|
|
|
|
|
|
|
::Microsoft::WRL::ComPtr<ID3D11ShaderResourceView> shaderResource;
|
|
|
|
RETURN_IF_FAILED(_d3dDevice->CreateShaderResourceView(_framebufferCapture.Get(), &srvDesc, &shaderResource));
|
|
|
|
|
|
|
|
// Render the screen quad with shader effects.
|
|
|
|
const UINT stride = sizeof(ShaderInput);
|
|
|
|
const UINT offset = 0;
|
|
|
|
|
|
|
|
_d3dDeviceContext->OMSetRenderTargets(1, _renderTargetView.GetAddressOf(), nullptr);
|
|
|
|
_d3dDeviceContext->IASetVertexBuffers(0, 1, _screenQuadVertexBuffer.GetAddressOf(), &stride, &offset);
|
|
|
|
_d3dDeviceContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP);
|
|
|
|
_d3dDeviceContext->IASetInputLayout(_vertexLayout.Get());
|
|
|
|
_d3dDeviceContext->VSSetShader(_vertexShader.Get(), nullptr, 0);
|
|
|
|
_d3dDeviceContext->PSSetShader(_pixelShader.Get(), nullptr, 0);
|
|
|
|
_d3dDeviceContext->PSSetShaderResources(0, 1, shaderResource.GetAddressOf());
|
|
|
|
_d3dDeviceContext->PSSetSamplers(0, 1, _samplerState.GetAddressOf());
|
Scale retro terminal scan lines (#4716)
<!-- Enter a brief description/summary of your PR here. What does it fix/what does it change/how was it tested (even manually, if necessary)? -->
## Summary of the Pull Request
- Scale the retro terminal effects (#3468) scan lines with the screen's DPI.
- Remove artifacts from sampling wrap around.
Before & after, with my display scale set to 350%:
![Scaling scan lines](https://user-images.githubusercontent.com/38924837/75214566-df0f4780-5742-11ea-9bdc-3430eb24ccca.png)
Before & after showing artifact removal, with my display scale set to 100%, and image enlarged to 400%:
![Sampling artifacts annotated](https://user-images.githubusercontent.com/38924837/75214618-05cd7e00-5743-11ea-9060-f4eba257ea56.png)
<!-- Please review the items on the PR checklist before submitting-->
## PR Checklist
* [x] Closes #4362
* [x] CLA signed. If not, go over [here](https://cla.opensource.microsoft.com/microsoft/Terminal) and sign the CLA
* [ ] Tests added/passed
* [ ] Requires documentation to be updated
* [ ] I've discussed this with core contributors already. If not checked, I'm ready to accept this work might be rejected in favor of a different grand plan. Issue number where discussion took place: #xxx
<!-- Provide a more detailed description of the PR, other things fixed or any additional comments/features here -->
## Detailed Description of the Pull Request / Additional comments
Adds a constant buffer, which could be used for other settings for the retro terminal pixel shader.
I haven't touched C++ in over a decade before this change, and this is the first time I've played with DirectX, so please assume my code isn't exactly best practice. 🙂
<!-- Describe how you validated the behavior. Add automated tests wherever possible, but list manual validation steps taken as well -->
## Validation Steps Performed
- Changed display scale with experimental.retroTerminalEffect enabled, enjoyed scan lines on high resolution monitors.
- Enabled experimental.retroTerminalEffect, turned the setting off, changed display scale. Retro tabs still scale scan lines.
2020-02-26 01:08:45 +01:00
|
|
|
_d3dDeviceContext->PSSetConstantBuffers(0, 1, _pixelShaderSettingsBuffer.GetAddressOf());
|
2019-12-12 14:44:01 +01:00
|
|
|
_d3dDeviceContext->Draw(ARRAYSIZE(_screenQuadVertices), 0);
|
|
|
|
|
|
|
|
return S_OK;
|
|
|
|
}
|
|
|
|
CATCH_RETURN()
|
|
|
|
|
2019-05-03 00:29:04 +02:00
|
|
|
// Routine Description:
|
|
|
|
// - Updates the default brush colors used for drawing
|
|
|
|
// Arguments:
|
Improve the propagation of color attributes over ConPTY (#6506)
This PR reimplements the VT rendering engines to do a better job of
preserving the original color types when propagating attributes over
ConPTY. For the 16-color renderers it provides better support for
default colors and improves the efficiency of the color narrowing
conversions. It also fixes problems with the ordering of character
renditions that could result in attributes being dropped.
Originally the base renderer would calculate the RGB color values and
legacy/extended attributes up front, passing that data on to the active
engine's `UpdateDrawingBrushes` method. With this new implementation,
the renderer now just passes through the original `TextAttribute` along
with an `IRenderData` interface, and leaves it to the engines to extract
the information they need.
The GDI and DirectX engines now have to lookup the RGB colors themselves
(via simple `IRenderData` calls), but have no need for the other
attributes. The VT engines extract the information that they need from
the `TextAttribute`, instead of having to reverse engineer it from
`COLORREF`s.
The process for the 256-color Xterm engine starts with a check for
default colors. If both foreground and background are default, it
outputs a SGR 0 reset, and clears the `_lastTextAttribute` completely to
make sure any reset state is reapplied. With that out the way, the
foreground and background are updated (if changed) in one of 4 ways.
They can either be a default value (SGR 39 and 49), a 16-color index
(using ANSI or AIX sequences), a 256-color index, or a 24-bit RGB value
(both using SGR 38 and 48 sequences).
Then once the colors are accounted for, there is a separate step that
handles the character rendition attributes (bold, italics, underline,
etc.) This step must come _after_ the color sequences, in case a SGR
reset is required, which would otherwise have cleared any character
rendition attributes if it came last (which is what happened in the
original implementation).
The process for the 16-color engines is a little different. The target
client in this case (Windows telnet) is incapable of setting default
colors individually, so we need to output an SGR 0 reset if _either_
color has changed to default. With that out the way, we use the
`TextColor::GetLegacyIndex` method to obtain an approximate 16-color
index for each color, and apply the bold attribute by brightening the
foreground index (setting bit 8) if the color type permits that.
However, since Windows telnet only supports the 8 basic ANSI colors, the
best we can do for bright colors is to output an SGR 1 attribute to get
a bright foreground. There is nothing we can do about a bright
background, so after that we just have to drop the high bit from the
colors. If the resulting index values have changed from what they were
before, we then output ANSI 8-color SGR sequences to update them.
As with the 256-color engine, there is also a final step to handle the
character rendition attributes. But in this case, the only supported
attributes are underline and reversed video.
Since the VT engines no longer depend on the active color table and
default color values, there was quite a lot of code that could now be
removed. This included the `IDefaultColorProvider` interface and
implementations, the `Find(Nearest)TableIndex` functions, and also the
associated HLS conversion and difference calculations.
VALIDATION
Other than simple API parameter changes, the majority of updates
required in the unit tests were to correct assumptions about the way the
colors should be rendered, which were the source of the narrowing bugs
this PR was trying to fix. Like passing white on black to the
`UpdateDrawingBrushes` API, and expecting it to output the default `SGR
0` sequence, or passing an RGB color and expecting an indexed SGR
sequence.
In addition to that, I've added some VT renderer tests to make sure the
rendition attributes (bold, underline, etc) are correctly retained when
a default color update causes an `SGR 0` sequence to be generated (the
source of bug #3076). And I've extended the VT renderer color tests
(both 256-color and 16-color) to make sure we're covering all of the
different color types (default, RGB, and both forms of indexed colors).
I've also tried to manually verify that all of the test cases in the
linked bug reports (and their associated duplicates) are now fixed when
this PR is applied.
Closes #2661
Closes #3076
Closes #3717
Closes #5384
Closes #5864
This is only a partial fix for #293, but I suspect the remaining cases
are unfixable.
2020-07-01 20:10:36 +02:00
|
|
|
// - textAttributes - Text attributes to use for the brush color
|
|
|
|
// - pData - The interface to console data structures required for rendering
|
2019-05-03 00:29:04 +02:00
|
|
|
// - isSettingDefaultBrushes - Lets us know that these are the default brushes to paint the swapchain background or selection
|
|
|
|
// Return Value:
|
|
|
|
// - S_OK or relevant DirectX error.
|
Improve the propagation of color attributes over ConPTY (#6506)
This PR reimplements the VT rendering engines to do a better job of
preserving the original color types when propagating attributes over
ConPTY. For the 16-color renderers it provides better support for
default colors and improves the efficiency of the color narrowing
conversions. It also fixes problems with the ordering of character
renditions that could result in attributes being dropped.
Originally the base renderer would calculate the RGB color values and
legacy/extended attributes up front, passing that data on to the active
engine's `UpdateDrawingBrushes` method. With this new implementation,
the renderer now just passes through the original `TextAttribute` along
with an `IRenderData` interface, and leaves it to the engines to extract
the information they need.
The GDI and DirectX engines now have to lookup the RGB colors themselves
(via simple `IRenderData` calls), but have no need for the other
attributes. The VT engines extract the information that they need from
the `TextAttribute`, instead of having to reverse engineer it from
`COLORREF`s.
The process for the 256-color Xterm engine starts with a check for
default colors. If both foreground and background are default, it
outputs a SGR 0 reset, and clears the `_lastTextAttribute` completely to
make sure any reset state is reapplied. With that out the way, the
foreground and background are updated (if changed) in one of 4 ways.
They can either be a default value (SGR 39 and 49), a 16-color index
(using ANSI or AIX sequences), a 256-color index, or a 24-bit RGB value
(both using SGR 38 and 48 sequences).
Then once the colors are accounted for, there is a separate step that
handles the character rendition attributes (bold, italics, underline,
etc.) This step must come _after_ the color sequences, in case a SGR
reset is required, which would otherwise have cleared any character
rendition attributes if it came last (which is what happened in the
original implementation).
The process for the 16-color engines is a little different. The target
client in this case (Windows telnet) is incapable of setting default
colors individually, so we need to output an SGR 0 reset if _either_
color has changed to default. With that out the way, we use the
`TextColor::GetLegacyIndex` method to obtain an approximate 16-color
index for each color, and apply the bold attribute by brightening the
foreground index (setting bit 8) if the color type permits that.
However, since Windows telnet only supports the 8 basic ANSI colors, the
best we can do for bright colors is to output an SGR 1 attribute to get
a bright foreground. There is nothing we can do about a bright
background, so after that we just have to drop the high bit from the
colors. If the resulting index values have changed from what they were
before, we then output ANSI 8-color SGR sequences to update them.
As with the 256-color engine, there is also a final step to handle the
character rendition attributes. But in this case, the only supported
attributes are underline and reversed video.
Since the VT engines no longer depend on the active color table and
default color values, there was quite a lot of code that could now be
removed. This included the `IDefaultColorProvider` interface and
implementations, the `Find(Nearest)TableIndex` functions, and also the
associated HLS conversion and difference calculations.
VALIDATION
Other than simple API parameter changes, the majority of updates
required in the unit tests were to correct assumptions about the way the
colors should be rendered, which were the source of the narrowing bugs
this PR was trying to fix. Like passing white on black to the
`UpdateDrawingBrushes` API, and expecting it to output the default `SGR
0` sequence, or passing an RGB color and expecting an indexed SGR
sequence.
In addition to that, I've added some VT renderer tests to make sure the
rendition attributes (bold, underline, etc) are correctly retained when
a default color update causes an `SGR 0` sequence to be generated (the
source of bug #3076). And I've extended the VT renderer color tests
(both 256-color and 16-color) to make sure we're covering all of the
different color types (default, RGB, and both forms of indexed colors).
I've also tried to manually verify that all of the test cases in the
linked bug reports (and their associated duplicates) are now fixed when
this PR is applied.
Closes #2661
Closes #3076
Closes #3717
Closes #5384
Closes #5864
This is only a partial fix for #293, but I suspect the remaining cases
are unfixable.
2020-07-01 20:10:36 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::UpdateDrawingBrushes(const TextAttribute& textAttributes,
|
|
|
|
const gsl::not_null<IRenderData*> pData,
|
|
|
|
const bool isSettingDefaultBrushes) noexcept
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2020-04-24 19:16:34 +02:00
|
|
|
// GH#5098: If we're rendering with cleartype text, we need to always render
|
|
|
|
// onto an opaque background. If our background's opacity is 1.0f, that's
|
|
|
|
// great, we can actually use cleartype in that case. In that scenario
|
|
|
|
// (cleartype && opacity == 1.0), we'll force the opacity bits of the
|
|
|
|
// COLORREF to 0xff so we draw as cleartype. In any other case, leave the
|
|
|
|
// opacity bits unchanged. PaintBufferLine will later do some logic to
|
|
|
|
// determine if we should paint the text as grayscale or not.
|
|
|
|
const bool usingCleartype = _antialiasingMode == D2D1_TEXT_ANTIALIAS_MODE_CLEARTYPE;
|
|
|
|
const bool usingTransparency = _defaultTextBackgroundOpacity != 1.0f;
|
|
|
|
const bool forceOpaqueBG = usingCleartype && !usingTransparency;
|
|
|
|
|
Refactor the renderer color calculations (#6853)
This is a refactoring of the renderer color calculations to simplify the
implementation, and to make it easier to support additional
color-altering rendition attributes in the future (e.g. _faint_ and
_conceal_).
## References
* This is a followup to PRs #3817 and #6809, which introduced additional
complexity in the color calculations, and which suggested the need for
refactoring.
## Detailed Description of the Pull Request / Additional comments
When we added support for `DECSCNM`, that required the foreground and
background color lookup methods to be able to return the opposite of
what was requested when the reversed mode was set. That made those
methods unnecessarily complicated, and I thought we could simplify them
considerably just by combining the calculations into a single method
that derived both colors at the same time.
And since both conhost and Windows Terminal needed to perform the same
calculations, it also made sense to move that functionality into the
`TextAttribute` class, where it could easily be shared.
In general this way of doing things is a bit more efficient. However, it
does result in some unnecessary work when only one of the colors is
required, as is the case for the gridline painter. So to make that less
of an issue, I've reordered the gridline code a bit so it at least
avoids looking up the colors when no gridlines are needed.
## Validation Steps Performed
Because of the API changes, quite a lot of the unit tests had to be
updated. For example instead of verifying colors with two separate calls
to `LookupForegroundColor` and `LookupBackgroundColor`, that's now
achieved with a single `LookupAttributeColors` call, comparing against a
pair of values. The specifics of the tests haven't changed though, and
they're all still working as expected.
I've also manually confirmed that the various color sequences and
rendition attributes are rendering correctly with the new refactoring.
2020-07-11 00:26:34 +02:00
|
|
|
const auto [colorForeground, colorBackground] = pData->GetAttributeColors(textAttributes);
|
Improve the propagation of color attributes over ConPTY (#6506)
This PR reimplements the VT rendering engines to do a better job of
preserving the original color types when propagating attributes over
ConPTY. For the 16-color renderers it provides better support for
default colors and improves the efficiency of the color narrowing
conversions. It also fixes problems with the ordering of character
renditions that could result in attributes being dropped.
Originally the base renderer would calculate the RGB color values and
legacy/extended attributes up front, passing that data on to the active
engine's `UpdateDrawingBrushes` method. With this new implementation,
the renderer now just passes through the original `TextAttribute` along
with an `IRenderData` interface, and leaves it to the engines to extract
the information they need.
The GDI and DirectX engines now have to lookup the RGB colors themselves
(via simple `IRenderData` calls), but have no need for the other
attributes. The VT engines extract the information that they need from
the `TextAttribute`, instead of having to reverse engineer it from
`COLORREF`s.
The process for the 256-color Xterm engine starts with a check for
default colors. If both foreground and background are default, it
outputs a SGR 0 reset, and clears the `_lastTextAttribute` completely to
make sure any reset state is reapplied. With that out the way, the
foreground and background are updated (if changed) in one of 4 ways.
They can either be a default value (SGR 39 and 49), a 16-color index
(using ANSI or AIX sequences), a 256-color index, or a 24-bit RGB value
(both using SGR 38 and 48 sequences).
Then once the colors are accounted for, there is a separate step that
handles the character rendition attributes (bold, italics, underline,
etc.) This step must come _after_ the color sequences, in case a SGR
reset is required, which would otherwise have cleared any character
rendition attributes if it came last (which is what happened in the
original implementation).
The process for the 16-color engines is a little different. The target
client in this case (Windows telnet) is incapable of setting default
colors individually, so we need to output an SGR 0 reset if _either_
color has changed to default. With that out the way, we use the
`TextColor::GetLegacyIndex` method to obtain an approximate 16-color
index for each color, and apply the bold attribute by brightening the
foreground index (setting bit 8) if the color type permits that.
However, since Windows telnet only supports the 8 basic ANSI colors, the
best we can do for bright colors is to output an SGR 1 attribute to get
a bright foreground. There is nothing we can do about a bright
background, so after that we just have to drop the high bit from the
colors. If the resulting index values have changed from what they were
before, we then output ANSI 8-color SGR sequences to update them.
As with the 256-color engine, there is also a final step to handle the
character rendition attributes. But in this case, the only supported
attributes are underline and reversed video.
Since the VT engines no longer depend on the active color table and
default color values, there was quite a lot of code that could now be
removed. This included the `IDefaultColorProvider` interface and
implementations, the `Find(Nearest)TableIndex` functions, and also the
associated HLS conversion and difference calculations.
VALIDATION
Other than simple API parameter changes, the majority of updates
required in the unit tests were to correct assumptions about the way the
colors should be rendered, which were the source of the narrowing bugs
this PR was trying to fix. Like passing white on black to the
`UpdateDrawingBrushes` API, and expecting it to output the default `SGR
0` sequence, or passing an RGB color and expecting an indexed SGR
sequence.
In addition to that, I've added some VT renderer tests to make sure the
rendition attributes (bold, underline, etc) are correctly retained when
a default color update causes an `SGR 0` sequence to be generated (the
source of bug #3076). And I've extended the VT renderer color tests
(both 256-color and 16-color) to make sure we're covering all of the
different color types (default, RGB, and both forms of indexed colors).
I've also tried to manually verify that all of the test cases in the
linked bug reports (and their associated duplicates) are now fixed when
this PR is applied.
Closes #2661
Closes #3076
Closes #3717
Closes #5384
Closes #5864
This is only a partial fix for #293, but I suspect the remaining cases
are unfixable.
2020-07-01 20:10:36 +02:00
|
|
|
|
2020-04-24 19:16:34 +02:00
|
|
|
_foregroundColor = _ColorFFromColorRef(OPACITY_OPAQUE | colorForeground);
|
|
|
|
_backgroundColor = _ColorFFromColorRef((forceOpaqueBG ? OPACITY_OPAQUE : 0) | colorBackground);
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
_d2dBrushForeground->SetColor(_foregroundColor);
|
|
|
|
_d2dBrushBackground->SetColor(_backgroundColor);
|
|
|
|
|
|
|
|
// If this flag is set, then we need to update the default brushes too and the swap chain background.
|
|
|
|
if (isSettingDefaultBrushes)
|
|
|
|
{
|
|
|
|
_defaultForegroundColor = _foregroundColor;
|
|
|
|
_defaultBackgroundColor = _backgroundColor;
|
|
|
|
|
|
|
|
// If we have a swap chain, set the background color there too so the area
|
|
|
|
// outside the chain on a resize can be filled in with an appropriate color value.
|
|
|
|
/*if (_dxgiSwapChain)
|
|
|
|
{
|
|
|
|
const auto dxgiColor = s_RgbaFromColorF(_defaultBackgroundColor);
|
|
|
|
RETURN_IF_FAILED(_dxgiSwapChain->SetBackgroundColor(&dxgiColor));
|
|
|
|
}*/
|
|
|
|
}
|
|
|
|
|
2020-06-22 18:13:09 +02:00
|
|
|
// If we have a drawing context, it may be choosing its antialiasing based
|
|
|
|
// on the colors. Update it if it exists.
|
|
|
|
// We only need to do this here because this is called all the time on painting frames
|
|
|
|
// and will update it in a timely fashion. Changing the AA mode or opacity do affect
|
|
|
|
// it, but we will always hit updating the drawing brushes so we don't
|
|
|
|
// need to update this in those locations.
|
|
|
|
if (_drawingContext)
|
|
|
|
{
|
|
|
|
_drawingContext->forceGrayscaleAA = _ShouldForceGrayscaleAA();
|
|
|
|
}
|
|
|
|
|
2019-05-03 00:29:04 +02:00
|
|
|
return S_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Updates the font used for drawing
|
|
|
|
// Arguments:
|
|
|
|
// - pfiFontInfoDesired - Information specifying the font that is requested
|
|
|
|
// - fiFontInfo - Filled with the nearest font actually chosen for drawing
|
|
|
|
// Return Value:
|
|
|
|
// - S_OK or relevant DirectX error
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::UpdateFont(const FontInfoDesired& pfiFontInfoDesired, FontInfo& fiFontInfo) noexcept
|
2020-04-13 22:09:02 +02:00
|
|
|
try
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2019-08-30 00:23:07 +02:00
|
|
|
RETURN_IF_FAILED(_GetProposedFont(pfiFontInfoDesired,
|
2019-09-04 01:18:19 +02:00
|
|
|
fiFontInfo,
|
|
|
|
_dpi,
|
|
|
|
_dwriteTextFormat,
|
|
|
|
_dwriteTextAnalyzer,
|
Refactor grid line renderers with support for more line types (#7107)
This is a refactoring of the grid line renderers, adjusting the line
widths to scale with the font size, and optimising the implementation to
cut down on the number of draw calls. It also extends the supported grid
line types to include true underlines and strike-through lines in the
style of the active font.
The main gist of the optimisation was to render the horizontal lines
with a single draw call, instead of a loop with lots of little strokes
joined together. In the case of the vertical lines, which still needed
to be handled in a loop, I've tried to move the majority of static
calculations outside the loop, so there is bit of optimisation there
too.
At the same time this code was updated to support a variable stroke
width for the lines, instead of having them hardcoded to 1 pixel. The
width is now calculated as a fraction of the font size (0.025 "em"),
which is still going to be 1 pixel wide in most typical usage, but will
scale up appropriately if you zoom in far enough.
And in preparation for supporting the SGR strike-through attribute, and
true underlines, I've extended the grid line renders with options for
handling those line types as well. The offset and thickness of the lines
is obtained from the font metrics (rounded to a pixel width, with a
minimum of one pixel), so they match the style of the font.
VALIDATION
For now we're still only rendering grid lines, and only the top and
bottom lines in the case of the DirectX renderer in Windows Terminal. So
to test, I hacked in some code to force the renderer to use all the
different options, confirming that they were working in both the GDI and
DirectX renderers.
I've tested the output with a number of different fonts, comparing it
with the same text rendered in WordPad. For the most part they match
exactly, but there can be slight differences when we adjust the font
size for grid alignment. And in the case of the GDI renderer, where
we're working with pixel heights rather than points, it's difficult to
match the sizes exactly.
This is a first step towards supporting the strike-through attribute
(#6205) and true underlines (#2915).
Closes #6911
2020-07-31 00:43:37 +02:00
|
|
|
_dwriteFontFace,
|
|
|
|
_lineMetrics));
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
_glyphCell = fiFontInfo.GetSize();
|
2019-08-30 00:23:07 +02:00
|
|
|
|
Scale box drawing glyphs to fit cells for visual bliss (#5743)
## Summary of the Pull Request
Identifies and scales glyphs in the box and line drawing ranges U+2500-U+259F to fit their cells.
## PR Checklist
* [x] Closes #455
* [x] I work here.
* [x] Manual tests. This is all graphical.
* [x] Metric ton of comments
* [x] Math spreadsheet included in PR.
* [x] Double check RTL glyphs.
* [x] Why is there the extra pixel?
* [x] Scrolling the mouse wheel check is done.
* [x] Not drawing outline?
* [x] Am core contributor. Roar.
* [x] Try suppressing negative scale factors and see if that gets rid of weird shading.
## Detailed Description of the Pull Request / Additional comments
### Background
- We want the Terminal to be fast at drawing. To be fast at drawing, we perform differential drawing, or only drawing what is different from the previous frame. We use DXGI's `Present1` method to help us with this as it helps us compose only the deltas onto the previous frame at drawing time and assists us in scrolling regions from the previous frame without intervention. However, it only works on strictly integer pixel row heights.
- Most of the hit testing and size-calculation logic in both the `conhost` and the Terminal products are based on the size of an individual cell. Historically, a cell was always dictated in a `COORD` structure, or two `SHORT` values... which are integers. As such, when we specify the space for any individual glyph to be displayed inside our terminal drawing region, we want it to fall perfectly inside of an integer box to ensure all these other algorithms work correctly and continue to do so.
- Finally, we want the Terminal to have font fallback and locate glyphs that aren't in the primary selected font from any other font it can find on the system that contains the glyph, per DirectWrite's font fallback mechanisms. These glyphs won't necessarily have the same font or glyph metrics as the base font, but we need them to fit inside the same cell dimensions as if they did because the hit testing and other algorithms aren't aware of which particular font is sourcing each glyph, just the dimensions of the bounding box per cell.
### How does Terminal deal with this?
- When we select a font, we perform some calculations using the design metrics of the font and glyphs to determine how we could fit them inside a cell with integer dimensions. Our process here is that we take the requested font size (which is generally a proxy for height), find the matching glyph width for that height then round it to an integer. We back convert from that now integer width to a height value which is almost certainly now a floating point number. But because we need an integer box value, we add line padding above and below the glyphs to ensure that the height is an integer as well as the width. Finally, we don't add the padding strictly equally. We attempt to align the English baseline of the glyph box directly onto an integer pixel multiple so most characters sit crisply on a line when displayed.
- Note that fonts and their glyphs have a prescribed baseline, line gap, and advance values. We use those as guidelines to get us started, but then to meet our requirements, we pad out from those. This results in fonts that should be properly authored showing gaps. It also results in fonts that are improperly authored looking even worse than they normally would.
### Now how does block and line drawing come in?
- Block and Line drawing glyphs are generally authored so they will look fine when the font and glyph metrics are followed exactly as prescribed by the font. (For some fonts, this still isn't true and we want them to look fine anyway.)
- When we add additional padding or rounding to make glyphs fit inside of a cell, we can be adding more space than was prescribed around these glyphs. This can cause a gap to be visible.
- Additionally, when we move things like baselines to land on a perfect integer pixel, we may be drawing a glyph lower in the bounding box than was prescribed originally.
### And how do we solve it?
- We identify all glyphs in the line and block drawing ranges.
- We find the bounding boxes of both the cell and the glyph.
- We compare the height of the glyph to the height of the cell to see if we need to scale. We prescribe a scale transform if the glyph wouldn't be tall enough to fit the box. (We leave it alone otherwise as some glyphs intentionally overscan the box and scaling them can cause banding effects.)
- We inspect the overhang/underhang above and below the boxes and translate transform them (slide them) so they cover the entire cell area.
- We repeat the previous two steps but in the horizontal direction.
## Validation Steps Performed
- See these commments:
- https://github.com/microsoft/terminal/issues/455#issuecomment-620248375
- https://github.com/microsoft/terminal/issues/455#issuecomment-621533916
- https://github.com/microsoft/terminal/issues/455#issuecomment-622585453
Also see the below one with more screenshots:
- https://github.com/microsoft/terminal/pull/5743#issuecomment-624940567
2020-05-08 23:09:32 +02:00
|
|
|
// Calculate and cache the box effect for the base font. Scale is 1.0f because the base font is exactly the scale we want already.
|
|
|
|
RETURN_IF_FAILED(CustomTextLayout::s_CalculateBoxEffect(_dwriteTextFormat.Get(), _glyphCell.width(), _dwriteFontFace.Get(), 1.0f, &_boxDrawingEffect));
|
|
|
|
|
2020-06-22 18:13:09 +02:00
|
|
|
// Prepare the text layout
|
|
|
|
_customLayout = WRL::Make<CustomTextLayout>(_dwriteFactory.Get(), _dwriteTextAnalyzer.Get(), _dwriteTextFormat.Get(), _dwriteFontFace.Get(), _glyphCell.width(), _boxDrawingEffect.Get());
|
|
|
|
|
2019-08-30 00:23:07 +02:00
|
|
|
return S_OK;
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
2020-04-13 22:09:02 +02:00
|
|
|
CATCH_RETURN();
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] Viewport DxEngine::GetViewportInCharacters(const Viewport& viewInPixels) noexcept
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2020-04-13 22:09:02 +02:00
|
|
|
const short widthInChars = gsl::narrow_cast<short>(viewInPixels.Width() / _glyphCell.width());
|
|
|
|
const short heightInChars = gsl::narrow_cast<short>(viewInPixels.Height() / _glyphCell.height());
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
return Viewport::FromDimensions(viewInPixels.Origin(), { widthInChars, heightInChars });
|
|
|
|
}
|
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Sets the DPI in this renderer
|
|
|
|
// Arguments:
|
|
|
|
// - iDpi - DPI
|
|
|
|
// Return Value:
|
|
|
|
// - S_OK
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::UpdateDpi(int const iDpi) noexcept
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
|
|
|
_dpi = iDpi;
|
|
|
|
|
|
|
|
// The scale factor may be necessary for composition contexts, so save it once here.
|
|
|
|
_scale = _dpi / static_cast<float>(USER_DEFAULT_SCREEN_DPI);
|
|
|
|
|
|
|
|
RETURN_IF_FAILED(InvalidateAll());
|
|
|
|
|
Scale retro terminal scan lines (#4716)
<!-- Enter a brief description/summary of your PR here. What does it fix/what does it change/how was it tested (even manually, if necessary)? -->
## Summary of the Pull Request
- Scale the retro terminal effects (#3468) scan lines with the screen's DPI.
- Remove artifacts from sampling wrap around.
Before & after, with my display scale set to 350%:
![Scaling scan lines](https://user-images.githubusercontent.com/38924837/75214566-df0f4780-5742-11ea-9bdc-3430eb24ccca.png)
Before & after showing artifact removal, with my display scale set to 100%, and image enlarged to 400%:
![Sampling artifacts annotated](https://user-images.githubusercontent.com/38924837/75214618-05cd7e00-5743-11ea-9060-f4eba257ea56.png)
<!-- Please review the items on the PR checklist before submitting-->
## PR Checklist
* [x] Closes #4362
* [x] CLA signed. If not, go over [here](https://cla.opensource.microsoft.com/microsoft/Terminal) and sign the CLA
* [ ] Tests added/passed
* [ ] Requires documentation to be updated
* [ ] I've discussed this with core contributors already. If not checked, I'm ready to accept this work might be rejected in favor of a different grand plan. Issue number where discussion took place: #xxx
<!-- Provide a more detailed description of the PR, other things fixed or any additional comments/features here -->
## Detailed Description of the Pull Request / Additional comments
Adds a constant buffer, which could be used for other settings for the retro terminal pixel shader.
I haven't touched C++ in over a decade before this change, and this is the first time I've played with DirectX, so please assume my code isn't exactly best practice. 🙂
<!-- Describe how you validated the behavior. Add automated tests wherever possible, but list manual validation steps taken as well -->
## Validation Steps Performed
- Changed display scale with experimental.retroTerminalEffect enabled, enjoyed scan lines on high resolution monitors.
- Enabled experimental.retroTerminalEffect, turned the setting off, changed display scale. Retro tabs still scale scan lines.
2020-02-26 01:08:45 +01:00
|
|
|
if (_retroTerminalEffects && _d3dDeviceContext && _pixelShaderSettingsBuffer)
|
|
|
|
{
|
|
|
|
_ComputePixelShaderSettings();
|
|
|
|
try
|
|
|
|
{
|
2020-03-20 21:35:12 +01:00
|
|
|
_d3dDeviceContext->UpdateSubresource(_pixelShaderSettingsBuffer.Get(), 0, nullptr, &_pixelShaderSettings, 0, 0);
|
Scale retro terminal scan lines (#4716)
<!-- Enter a brief description/summary of your PR here. What does it fix/what does it change/how was it tested (even manually, if necessary)? -->
## Summary of the Pull Request
- Scale the retro terminal effects (#3468) scan lines with the screen's DPI.
- Remove artifacts from sampling wrap around.
Before & after, with my display scale set to 350%:
![Scaling scan lines](https://user-images.githubusercontent.com/38924837/75214566-df0f4780-5742-11ea-9bdc-3430eb24ccca.png)
Before & after showing artifact removal, with my display scale set to 100%, and image enlarged to 400%:
![Sampling artifacts annotated](https://user-images.githubusercontent.com/38924837/75214618-05cd7e00-5743-11ea-9060-f4eba257ea56.png)
<!-- Please review the items on the PR checklist before submitting-->
## PR Checklist
* [x] Closes #4362
* [x] CLA signed. If not, go over [here](https://cla.opensource.microsoft.com/microsoft/Terminal) and sign the CLA
* [ ] Tests added/passed
* [ ] Requires documentation to be updated
* [ ] I've discussed this with core contributors already. If not checked, I'm ready to accept this work might be rejected in favor of a different grand plan. Issue number where discussion took place: #xxx
<!-- Provide a more detailed description of the PR, other things fixed or any additional comments/features here -->
## Detailed Description of the Pull Request / Additional comments
Adds a constant buffer, which could be used for other settings for the retro terminal pixel shader.
I haven't touched C++ in over a decade before this change, and this is the first time I've played with DirectX, so please assume my code isn't exactly best practice. 🙂
<!-- Describe how you validated the behavior. Add automated tests wherever possible, but list manual validation steps taken as well -->
## Validation Steps Performed
- Changed display scale with experimental.retroTerminalEffect enabled, enjoyed scan lines on high resolution monitors.
- Enabled experimental.retroTerminalEffect, turned the setting off, changed display scale. Retro tabs still scale scan lines.
2020-02-26 01:08:45 +01:00
|
|
|
}
|
|
|
|
CATCH_RETURN();
|
|
|
|
}
|
|
|
|
|
2019-05-03 00:29:04 +02:00
|
|
|
return S_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Method Description:
|
|
|
|
// - Get the current scale factor of this renderer. The actual DPI the renderer
|
|
|
|
// is USER_DEFAULT_SCREEN_DPI * GetScaling()
|
|
|
|
// Arguments:
|
|
|
|
// - <none>
|
|
|
|
// Return Value:
|
|
|
|
// - the scaling multiplier of this render engine
|
|
|
|
float DxEngine::GetScaling() const noexcept
|
|
|
|
{
|
|
|
|
return _scale;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Method Description:
|
|
|
|
// - This method will update our internal reference for how big the viewport is.
|
|
|
|
// Does nothing for DX.
|
|
|
|
// Arguments:
|
|
|
|
// - srNewViewport - The bounds of the new viewport.
|
|
|
|
// Return Value:
|
|
|
|
// - HRESULT S_OK
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::UpdateViewport(const SMALL_RECT /*srNewViewport*/) noexcept
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
|
|
|
return S_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Currently unused by this renderer
|
|
|
|
// Arguments:
|
|
|
|
// - pfiFontInfoDesired - <unused>
|
|
|
|
// - pfiFontInfo - <unused>
|
|
|
|
// - iDpi - <unused>
|
|
|
|
// Return Value:
|
|
|
|
// - S_OK
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::GetProposedFont(const FontInfoDesired& pfiFontInfoDesired,
|
|
|
|
FontInfo& pfiFontInfo,
|
|
|
|
int const iDpi) noexcept
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2019-07-12 00:20:15 +02:00
|
|
|
Microsoft::WRL::ComPtr<IDWriteTextFormat> format;
|
2019-05-03 00:29:04 +02:00
|
|
|
Microsoft::WRL::ComPtr<IDWriteTextAnalyzer1> analyzer;
|
2019-07-12 00:20:15 +02:00
|
|
|
Microsoft::WRL::ComPtr<IDWriteFontFace1> face;
|
Refactor grid line renderers with support for more line types (#7107)
This is a refactoring of the grid line renderers, adjusting the line
widths to scale with the font size, and optimising the implementation to
cut down on the number of draw calls. It also extends the supported grid
line types to include true underlines and strike-through lines in the
style of the active font.
The main gist of the optimisation was to render the horizontal lines
with a single draw call, instead of a loop with lots of little strokes
joined together. In the case of the vertical lines, which still needed
to be handled in a loop, I've tried to move the majority of static
calculations outside the loop, so there is bit of optimisation there
too.
At the same time this code was updated to support a variable stroke
width for the lines, instead of having them hardcoded to 1 pixel. The
width is now calculated as a fraction of the font size (0.025 "em"),
which is still going to be 1 pixel wide in most typical usage, but will
scale up appropriately if you zoom in far enough.
And in preparation for supporting the SGR strike-through attribute, and
true underlines, I've extended the grid line renders with options for
handling those line types as well. The offset and thickness of the lines
is obtained from the font metrics (rounded to a pixel width, with a
minimum of one pixel), so they match the style of the font.
VALIDATION
For now we're still only rendering grid lines, and only the top and
bottom lines in the case of the DirectX renderer in Windows Terminal. So
to test, I hacked in some code to force the renderer to use all the
different options, confirming that they were working in both the GDI and
DirectX renderers.
I've tested the output with a number of different fonts, comparing it
with the same text rendered in WordPad. For the most part they match
exactly, but there can be slight differences when we adjust the font
size for grid alignment. And in the case of the GDI renderer, where
we're working with pixel heights rather than points, it's difficult to
match the sizes exactly.
This is a first step towards supporting the strike-through attribute
(#6205) and true underlines (#2915).
Closes #6911
2020-07-31 00:43:37 +02:00
|
|
|
LineMetrics lineMetrics;
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
return _GetProposedFont(pfiFontInfoDesired,
|
|
|
|
pfiFontInfo,
|
|
|
|
iDpi,
|
|
|
|
format,
|
|
|
|
analyzer,
|
Refactor grid line renderers with support for more line types (#7107)
This is a refactoring of the grid line renderers, adjusting the line
widths to scale with the font size, and optimising the implementation to
cut down on the number of draw calls. It also extends the supported grid
line types to include true underlines and strike-through lines in the
style of the active font.
The main gist of the optimisation was to render the horizontal lines
with a single draw call, instead of a loop with lots of little strokes
joined together. In the case of the vertical lines, which still needed
to be handled in a loop, I've tried to move the majority of static
calculations outside the loop, so there is bit of optimisation there
too.
At the same time this code was updated to support a variable stroke
width for the lines, instead of having them hardcoded to 1 pixel. The
width is now calculated as a fraction of the font size (0.025 "em"),
which is still going to be 1 pixel wide in most typical usage, but will
scale up appropriately if you zoom in far enough.
And in preparation for supporting the SGR strike-through attribute, and
true underlines, I've extended the grid line renders with options for
handling those line types as well. The offset and thickness of the lines
is obtained from the font metrics (rounded to a pixel width, with a
minimum of one pixel), so they match the style of the font.
VALIDATION
For now we're still only rendering grid lines, and only the top and
bottom lines in the case of the DirectX renderer in Windows Terminal. So
to test, I hacked in some code to force the renderer to use all the
different options, confirming that they were working in both the GDI and
DirectX renderers.
I've tested the output with a number of different fonts, comparing it
with the same text rendered in WordPad. For the most part they match
exactly, but there can be slight differences when we adjust the font
size for grid alignment. And in the case of the GDI renderer, where
we're working with pixel heights rather than points, it's difficult to
match the sizes exactly.
This is a first step towards supporting the strike-through attribute
(#6205) and true underlines (#2915).
Closes #6911
2020-07-31 00:43:37 +02:00
|
|
|
face,
|
|
|
|
lineMetrics);
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Gets the area that we currently believe is dirty within the character cell grid
|
|
|
|
// Arguments:
|
|
|
|
// - <none>
|
|
|
|
// Return Value:
|
|
|
|
// - Rectangle describing dirty area in characters.
|
Move ConPTY to use til::bitmap (#5024)
## Summary of the Pull Request
Moves the ConPTY drawing mechanism (`VtRenderer`) to use the fine-grained `til::bitmap` individual-dirty-bit tracking mechanism instead of coarse-grained rectangle unions to improve drawing performance by dramatically reducing the total area redrawn.
## PR Checklist
* [x] Part of #778 and #1064
* [x] I work here
* [x] Tests added and updated.
* [x] I'm a core contributor
## Detailed Description of the Pull Request / Additional comments
- Converted `GetDirtyArea()` interface from `IRenderEngine` to use a vector of `til::rectangle` instead of the `SMALL_RECT` to banhammer inclusive rectangles.
- `VtEngine` now holds and operates on the `til::bitmap` for invalidation regions. All invalidation operation functions that used to be embedded inside `VtEngine` are deleted in favor of using the ones in `til::bitmap`.
- Updated `VtEngine` tracing to use new `til::bitmap` on trace and the new `to_string()` methods detailed below.
- Comparison operators for `til::bitmap` and complementary tests.
- Fixed an issue where the dirty rectangle shortcut in `til::bitmap` was set to 0,0,0,0 by default which means that `|=` on it with each `set()` operation was stretching the rectangle from 0,0. Now it's a `std::optional` so it has no value after just being cleared and will build from whatever the first invalidated rectangle is. Complementary tests added.
- Optional run caching for `til::bitmap` in the `runs()` method since both VT and DX renderers will likely want to generate the set of runs at the beginning of a frame and refer to them over and over through that frame. Saves the iteration and creation and caches inside `til::bitmap` where the chance of invalidation of the underlying data is known best. It is still possible to iterate manually with `begin()` and `end()` from the outside without caching, if desired. Complementary tests added.
- WEX templates added for `til::bitmap` and used in tests.
- `translate()` method for `til::bitmap` which will slide the dirty points in the direction specified by a `til::point` and optionally back-fill the uncovered area as dirty. Complementary tests added.
- Moves all string generation for `til` types `size`, `point`, `rectangle`, and `some` into a `to_string` method on each object such that it can be used in both ETW tracing scenarios AND in the TAEF templates uniformly. Adds a similar method for `bitmap`.
- Add tagging to `_bitmap_const_iterator` such that it appears as a valid **Input Iterator** to STL collections and can be used in a `std::vector` constructor as a range. Adds and cleans up operators on this iterator to match the theoretical requirements for an **Input Iterator**. Complementary tests added.
- Add loose operators to `til` which will allow some basic math operations (+, -, *, /) between `til::size` and `til::point` and vice versa. Complementary tests added. Complementary tests added.
- Adds operators to `til::rectangle` to allow scaling with basic math operations (+, -, *) versus `til::size` and translation with basic math operations (+, -) against `til::point`. Complementary tests added.
- In-place variants of some operations added to assorted `til` objects. Complementary tests added.
- Update VT tests to compare invalidation against the new map structure instead of raw rectangles where possible.
## Validation Steps Performed
- Wrote additional til Unit Tests for all additional operators and functions added to the project to support this operation
- Updated the existing VT renderer tests
- Ran perf check
2020-03-23 16:57:54 +01:00
|
|
|
[[nodiscard]] std::vector<til::rectangle> DxEngine::GetDirtyArea()
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2020-04-13 22:09:02 +02:00
|
|
|
return _invalidMap.runs();
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Gets the current font size
|
|
|
|
// Arguments:
|
|
|
|
// - pFontSize - Filled with the font size.
|
|
|
|
// Return Value:
|
|
|
|
// - S_OK
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::GetFontSize(_Out_ COORD* const pFontSize) noexcept
|
2020-04-13 22:09:02 +02:00
|
|
|
try
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2020-04-13 22:09:02 +02:00
|
|
|
*pFontSize = _glyphCell;
|
2019-05-03 00:29:04 +02:00
|
|
|
return S_OK;
|
|
|
|
}
|
2020-04-13 22:09:02 +02:00
|
|
|
CATCH_RETURN();
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Currently unused by this renderer.
|
|
|
|
// Arguments:
|
Merged PR 3215853: Fix spacing/layout for block characters and many retroactively-recategorized emoji (and more!)
This encompasses a handful of problems with column counting.
The Terminal project didn't set a fallback column counter. Oops. I've fixed this to use the `DxEngine` as the fallback.
The `DxEngine` didn't implement its fallback method. Oops. I've fixed this to use the `CustomTextLayout` to figure out the advances based on the same font and fallback pattern as the real final layout, just without "rounding" it into cells yet.
- `CustomTextLayout` has been updated to move the advance-correction into a separate phase from glyph shaping. Previously, we corrected the advances to nice round cell counts during shaping, which is fine for drawing, but hard for column count analysis.
- Now that there are separate phases, an `Analyze` method was added to the `CustomTextLayout` which just performs the text analysis steps and the glyph shaping, but no advance correction to column boundaries nor actual drawing.
I've taken the caching code that I was working on to improve chafa, and I've brought it into this. Now that we're doing a lot of fallback and heavy lifting in terms of analysis via the layout, we should cache the results until the font changes.
I've adjusted how column counting is done overall. It's always been in these phases:
1. We used a quick-lookup of ranges of characters we knew to rapidly decide `Narrow`, `Wide` or `Invalid` (a.k.a. "I dunno")
2. If it was `Invalid`, we consulted a table based off of the Unicode standard that has either `Narrow`, `Wide`, or `Ambiguous` as a result.
3. If it's still `Ambiguous`, we consult a render engine fallback (usually GDI or now DX) to see how many columns it would take.
4. If we still don't know, then it's `Wide` to be safe.
- I've added an additional flow here. The quick-lookup can now return `Ambiguous` off the bat for some glyph characters in the x2000-x3000 range that used to just be simple shapes but have been retroactively recategorized as emoji and are frequently now using full width color glyphs.
- This new state causes the lookup to go immediately to the render engine if it is available instead of consulting the Unicode standard table first because the half/fullwidth table doesn't appear to have been updated for this nuance to reclass these characters as ambiguous, but we'd like to keep that table as a "generated from the spec" sort of table and keep our exceptions in the "quick lookup" function.
I have confirmed the following things "just work" now:
- The windows logo flag from the demo. (⚫⚪💖✅🌌😊)
- The dotted chart on the side of crossterm demo (•)
- The powerline characters that make arrows with the Consolas patched font (██)
- An accented é
- The warning and checkmark symbols appearing same size as the X. (✔⚠🔥)
Related work items: #21167256, #21237515, #21243859, #21274645, #21296827
2019-05-02 01:13:53 +02:00
|
|
|
// - glyph - The glyph run to process for column width.
|
|
|
|
// - pResult - True if it should take two columns. False if it should take one.
|
2019-05-03 00:29:04 +02:00
|
|
|
// Return Value:
|
Merged PR 3215853: Fix spacing/layout for block characters and many retroactively-recategorized emoji (and more!)
This encompasses a handful of problems with column counting.
The Terminal project didn't set a fallback column counter. Oops. I've fixed this to use the `DxEngine` as the fallback.
The `DxEngine` didn't implement its fallback method. Oops. I've fixed this to use the `CustomTextLayout` to figure out the advances based on the same font and fallback pattern as the real final layout, just without "rounding" it into cells yet.
- `CustomTextLayout` has been updated to move the advance-correction into a separate phase from glyph shaping. Previously, we corrected the advances to nice round cell counts during shaping, which is fine for drawing, but hard for column count analysis.
- Now that there are separate phases, an `Analyze` method was added to the `CustomTextLayout` which just performs the text analysis steps and the glyph shaping, but no advance correction to column boundaries nor actual drawing.
I've taken the caching code that I was working on to improve chafa, and I've brought it into this. Now that we're doing a lot of fallback and heavy lifting in terms of analysis via the layout, we should cache the results until the font changes.
I've adjusted how column counting is done overall. It's always been in these phases:
1. We used a quick-lookup of ranges of characters we knew to rapidly decide `Narrow`, `Wide` or `Invalid` (a.k.a. "I dunno")
2. If it was `Invalid`, we consulted a table based off of the Unicode standard that has either `Narrow`, `Wide`, or `Ambiguous` as a result.
3. If it's still `Ambiguous`, we consult a render engine fallback (usually GDI or now DX) to see how many columns it would take.
4. If we still don't know, then it's `Wide` to be safe.
- I've added an additional flow here. The quick-lookup can now return `Ambiguous` off the bat for some glyph characters in the x2000-x3000 range that used to just be simple shapes but have been retroactively recategorized as emoji and are frequently now using full width color glyphs.
- This new state causes the lookup to go immediately to the render engine if it is available instead of consulting the Unicode standard table first because the half/fullwidth table doesn't appear to have been updated for this nuance to reclass these characters as ambiguous, but we'd like to keep that table as a "generated from the spec" sort of table and keep our exceptions in the "quick lookup" function.
I have confirmed the following things "just work" now:
- The windows logo flag from the demo. (⚫⚪💖✅🌌😊)
- The dotted chart on the side of crossterm demo (•)
- The powerline characters that make arrows with the Consolas patched font (██)
- An accented é
- The warning and checkmark symbols appearing same size as the X. (✔⚠🔥)
Related work items: #21167256, #21237515, #21243859, #21274645, #21296827
2019-05-02 01:13:53 +02:00
|
|
|
// - S_OK or relevant DirectWrite error.
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::IsGlyphWideByFont(const std::wstring_view glyph, _Out_ bool* const pResult) noexcept
|
2020-04-13 22:09:02 +02:00
|
|
|
try
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2019-09-03 17:46:24 +02:00
|
|
|
RETURN_HR_IF_NULL(E_INVALIDARG, pResult);
|
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
const Cluster cluster(glyph, 0); // columns don't matter, we're doing analysis not layout.
|
Merged PR 3215853: Fix spacing/layout for block characters and many retroactively-recategorized emoji (and more!)
This encompasses a handful of problems with column counting.
The Terminal project didn't set a fallback column counter. Oops. I've fixed this to use the `DxEngine` as the fallback.
The `DxEngine` didn't implement its fallback method. Oops. I've fixed this to use the `CustomTextLayout` to figure out the advances based on the same font and fallback pattern as the real final layout, just without "rounding" it into cells yet.
- `CustomTextLayout` has been updated to move the advance-correction into a separate phase from glyph shaping. Previously, we corrected the advances to nice round cell counts during shaping, which is fine for drawing, but hard for column count analysis.
- Now that there are separate phases, an `Analyze` method was added to the `CustomTextLayout` which just performs the text analysis steps and the glyph shaping, but no advance correction to column boundaries nor actual drawing.
I've taken the caching code that I was working on to improve chafa, and I've brought it into this. Now that we're doing a lot of fallback and heavy lifting in terms of analysis via the layout, we should cache the results until the font changes.
I've adjusted how column counting is done overall. It's always been in these phases:
1. We used a quick-lookup of ranges of characters we knew to rapidly decide `Narrow`, `Wide` or `Invalid` (a.k.a. "I dunno")
2. If it was `Invalid`, we consulted a table based off of the Unicode standard that has either `Narrow`, `Wide`, or `Ambiguous` as a result.
3. If it's still `Ambiguous`, we consult a render engine fallback (usually GDI or now DX) to see how many columns it would take.
4. If we still don't know, then it's `Wide` to be safe.
- I've added an additional flow here. The quick-lookup can now return `Ambiguous` off the bat for some glyph characters in the x2000-x3000 range that used to just be simple shapes but have been retroactively recategorized as emoji and are frequently now using full width color glyphs.
- This new state causes the lookup to go immediately to the render engine if it is available instead of consulting the Unicode standard table first because the half/fullwidth table doesn't appear to have been updated for this nuance to reclass these characters as ambiguous, but we'd like to keep that table as a "generated from the spec" sort of table and keep our exceptions in the "quick lookup" function.
I have confirmed the following things "just work" now:
- The windows logo flag from the demo. (⚫⚪💖✅🌌😊)
- The dotted chart on the side of crossterm demo (•)
- The powerline characters that make arrows with the Consolas patched font (██)
- An accented é
- The warning and checkmark symbols appearing same size as the X. (✔⚠🔥)
Related work items: #21167256, #21237515, #21243859, #21274645, #21296827
2019-05-02 01:13:53 +02:00
|
|
|
|
2020-06-22 18:13:09 +02:00
|
|
|
RETURN_IF_FAILED(_customLayout->Reset());
|
|
|
|
RETURN_IF_FAILED(_customLayout->AppendClusters({ &cluster, 1 }));
|
Merged PR 3215853: Fix spacing/layout for block characters and many retroactively-recategorized emoji (and more!)
This encompasses a handful of problems with column counting.
The Terminal project didn't set a fallback column counter. Oops. I've fixed this to use the `DxEngine` as the fallback.
The `DxEngine` didn't implement its fallback method. Oops. I've fixed this to use the `CustomTextLayout` to figure out the advances based on the same font and fallback pattern as the real final layout, just without "rounding" it into cells yet.
- `CustomTextLayout` has been updated to move the advance-correction into a separate phase from glyph shaping. Previously, we corrected the advances to nice round cell counts during shaping, which is fine for drawing, but hard for column count analysis.
- Now that there are separate phases, an `Analyze` method was added to the `CustomTextLayout` which just performs the text analysis steps and the glyph shaping, but no advance correction to column boundaries nor actual drawing.
I've taken the caching code that I was working on to improve chafa, and I've brought it into this. Now that we're doing a lot of fallback and heavy lifting in terms of analysis via the layout, we should cache the results until the font changes.
I've adjusted how column counting is done overall. It's always been in these phases:
1. We used a quick-lookup of ranges of characters we knew to rapidly decide `Narrow`, `Wide` or `Invalid` (a.k.a. "I dunno")
2. If it was `Invalid`, we consulted a table based off of the Unicode standard that has either `Narrow`, `Wide`, or `Ambiguous` as a result.
3. If it's still `Ambiguous`, we consult a render engine fallback (usually GDI or now DX) to see how many columns it would take.
4. If we still don't know, then it's `Wide` to be safe.
- I've added an additional flow here. The quick-lookup can now return `Ambiguous` off the bat for some glyph characters in the x2000-x3000 range that used to just be simple shapes but have been retroactively recategorized as emoji and are frequently now using full width color glyphs.
- This new state causes the lookup to go immediately to the render engine if it is available instead of consulting the Unicode standard table first because the half/fullwidth table doesn't appear to have been updated for this nuance to reclass these characters as ambiguous, but we'd like to keep that table as a "generated from the spec" sort of table and keep our exceptions in the "quick lookup" function.
I have confirmed the following things "just work" now:
- The windows logo flag from the demo. (⚫⚪💖✅🌌😊)
- The dotted chart on the side of crossterm demo (•)
- The powerline characters that make arrows with the Consolas patched font (██)
- An accented é
- The warning and checkmark symbols appearing same size as the X. (✔⚠🔥)
Related work items: #21167256, #21237515, #21243859, #21274645, #21296827
2019-05-02 01:13:53 +02:00
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
UINT32 columns = 0;
|
2020-06-22 18:13:09 +02:00
|
|
|
RETURN_IF_FAILED(_customLayout->GetColumns(&columns));
|
Merged PR 3215853: Fix spacing/layout for block characters and many retroactively-recategorized emoji (and more!)
This encompasses a handful of problems with column counting.
The Terminal project didn't set a fallback column counter. Oops. I've fixed this to use the `DxEngine` as the fallback.
The `DxEngine` didn't implement its fallback method. Oops. I've fixed this to use the `CustomTextLayout` to figure out the advances based on the same font and fallback pattern as the real final layout, just without "rounding" it into cells yet.
- `CustomTextLayout` has been updated to move the advance-correction into a separate phase from glyph shaping. Previously, we corrected the advances to nice round cell counts during shaping, which is fine for drawing, but hard for column count analysis.
- Now that there are separate phases, an `Analyze` method was added to the `CustomTextLayout` which just performs the text analysis steps and the glyph shaping, but no advance correction to column boundaries nor actual drawing.
I've taken the caching code that I was working on to improve chafa, and I've brought it into this. Now that we're doing a lot of fallback and heavy lifting in terms of analysis via the layout, we should cache the results until the font changes.
I've adjusted how column counting is done overall. It's always been in these phases:
1. We used a quick-lookup of ranges of characters we knew to rapidly decide `Narrow`, `Wide` or `Invalid` (a.k.a. "I dunno")
2. If it was `Invalid`, we consulted a table based off of the Unicode standard that has either `Narrow`, `Wide`, or `Ambiguous` as a result.
3. If it's still `Ambiguous`, we consult a render engine fallback (usually GDI or now DX) to see how many columns it would take.
4. If we still don't know, then it's `Wide` to be safe.
- I've added an additional flow here. The quick-lookup can now return `Ambiguous` off the bat for some glyph characters in the x2000-x3000 range that used to just be simple shapes but have been retroactively recategorized as emoji and are frequently now using full width color glyphs.
- This new state causes the lookup to go immediately to the render engine if it is available instead of consulting the Unicode standard table first because the half/fullwidth table doesn't appear to have been updated for this nuance to reclass these characters as ambiguous, but we'd like to keep that table as a "generated from the spec" sort of table and keep our exceptions in the "quick lookup" function.
I have confirmed the following things "just work" now:
- The windows logo flag from the demo. (⚫⚪💖✅🌌😊)
- The dotted chart on the side of crossterm demo (•)
- The powerline characters that make arrows with the Consolas patched font (██)
- An accented é
- The warning and checkmark symbols appearing same size as the X. (✔⚠🔥)
Related work items: #21167256, #21237515, #21243859, #21274645, #21296827
2019-05-02 01:13:53 +02:00
|
|
|
|
2020-04-13 22:09:02 +02:00
|
|
|
*pResult = columns != 1;
|
2019-06-11 22:27:09 +02:00
|
|
|
|
2019-05-03 00:29:04 +02:00
|
|
|
return S_OK;
|
|
|
|
}
|
2020-04-13 22:09:02 +02:00
|
|
|
CATCH_RETURN();
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
// Method Description:
|
|
|
|
// - Updates the window's title string.
|
|
|
|
// Arguments:
|
|
|
|
// - newTitle: the new string to use for the title of the window
|
|
|
|
// Return Value:
|
|
|
|
// - S_OK
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::_DoUpdateTitle(_In_ const std::wstring& /*newTitle*/) noexcept
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2019-11-26 01:30:45 +01:00
|
|
|
if (_hwndTarget != INVALID_HANDLE_VALUE)
|
|
|
|
{
|
|
|
|
return PostMessageW(_hwndTarget, CM_UPDATE_TITLE, 0, 0) ? S_OK : E_FAIL;
|
|
|
|
}
|
|
|
|
return S_FALSE;
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
|
|
|
|
2019-07-30 23:32:23 +02:00
|
|
|
// Routine Description:
|
|
|
|
// - Attempts to locate the font given, but then begins falling back if we cannot find it.
|
|
|
|
// - We'll try to fall back to Consolas with the given weight/stretch/style first,
|
|
|
|
// then try Consolas again with normal weight/stretch/style,
|
|
|
|
// and if nothing works, then we'll throw an error.
|
|
|
|
// Arguments:
|
|
|
|
// - familyName - The font name we should be looking for
|
|
|
|
// - weight - The weight (bold, light, etc.)
|
|
|
|
// - stretch - The stretch of the font is the spacing between each letter
|
|
|
|
// - style - Normal, italic, etc.
|
|
|
|
// Return Value:
|
|
|
|
// - Smart pointer holding interface reference for queryable font data.
|
|
|
|
[[nodiscard]] Microsoft::WRL::ComPtr<IDWriteFontFace1> DxEngine::_ResolveFontFaceWithFallback(std::wstring& familyName,
|
|
|
|
DWRITE_FONT_WEIGHT& weight,
|
|
|
|
DWRITE_FONT_STRETCH& stretch,
|
|
|
|
DWRITE_FONT_STYLE& style,
|
|
|
|
std::wstring& localeName) const
|
|
|
|
{
|
|
|
|
auto face = _FindFontFace(familyName, weight, stretch, style, localeName);
|
|
|
|
|
|
|
|
if (!face)
|
|
|
|
{
|
2019-11-26 01:30:45 +01:00
|
|
|
for (const auto fallbackFace : FALLBACK_FONT_FACES)
|
|
|
|
{
|
|
|
|
familyName = fallbackFace;
|
|
|
|
face = _FindFontFace(familyName, weight, stretch, style, localeName);
|
2019-07-30 23:32:23 +02:00
|
|
|
|
2019-11-26 01:30:45 +01:00
|
|
|
if (face)
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
familyName = fallbackFace;
|
|
|
|
weight = DWRITE_FONT_WEIGHT_NORMAL;
|
|
|
|
stretch = DWRITE_FONT_STRETCH_NORMAL;
|
|
|
|
style = DWRITE_FONT_STYLE_NORMAL;
|
|
|
|
face = _FindFontFace(familyName, weight, stretch, style, localeName);
|
|
|
|
|
|
|
|
if (face)
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2019-07-30 23:32:23 +02:00
|
|
|
}
|
|
|
|
|
2020-01-07 22:27:18 +01:00
|
|
|
THROW_HR_IF_NULL(E_FAIL, face);
|
2019-07-30 23:32:23 +02:00
|
|
|
|
|
|
|
return face;
|
|
|
|
}
|
|
|
|
|
2019-05-03 00:29:04 +02:00
|
|
|
// Routine Description:
|
|
|
|
// - Locates a suitable font face from the given information
|
|
|
|
// Arguments:
|
|
|
|
// - familyName - The font name we should be looking for
|
|
|
|
// - weight - The weight (bold, light, etc.)
|
|
|
|
// - stretch - The stretch of the font is the spacing between each letter
|
|
|
|
// - style - Normal, italic, etc.
|
|
|
|
// Return Value:
|
|
|
|
// - Smart pointer holding interface reference for queryable font data.
|
2019-07-30 23:32:23 +02:00
|
|
|
[[nodiscard]] Microsoft::WRL::ComPtr<IDWriteFontFace1> DxEngine::_FindFontFace(std::wstring& familyName,
|
|
|
|
DWRITE_FONT_WEIGHT& weight,
|
|
|
|
DWRITE_FONT_STRETCH& stretch,
|
|
|
|
DWRITE_FONT_STYLE& style,
|
|
|
|
std::wstring& localeName) const
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
2019-07-12 00:20:15 +02:00
|
|
|
Microsoft::WRL::ComPtr<IDWriteFontFace1> fontFace;
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
Microsoft::WRL::ComPtr<IDWriteFontCollection> fontCollection;
|
|
|
|
THROW_IF_FAILED(_dwriteFactory->GetSystemFontCollection(&fontCollection, false));
|
|
|
|
|
|
|
|
UINT32 familyIndex;
|
|
|
|
BOOL familyExists;
|
2019-07-30 23:32:23 +02:00
|
|
|
THROW_IF_FAILED(fontCollection->FindFamilyName(familyName.data(), &familyIndex, &familyExists));
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
if (familyExists)
|
|
|
|
{
|
|
|
|
Microsoft::WRL::ComPtr<IDWriteFontFamily> fontFamily;
|
|
|
|
THROW_IF_FAILED(fontCollection->GetFontFamily(familyIndex, &fontFamily));
|
|
|
|
|
|
|
|
Microsoft::WRL::ComPtr<IDWriteFont> font;
|
|
|
|
THROW_IF_FAILED(fontFamily->GetFirstMatchingFont(weight, stretch, style, &font));
|
|
|
|
|
|
|
|
Microsoft::WRL::ComPtr<IDWriteFontFace> fontFace0;
|
|
|
|
THROW_IF_FAILED(font->CreateFontFace(&fontFace0));
|
|
|
|
|
|
|
|
THROW_IF_FAILED(fontFace0.As(&fontFace));
|
2019-07-30 23:32:23 +02:00
|
|
|
|
2019-09-03 22:02:09 +02:00
|
|
|
// Retrieve metrics in case the font we created was different than what was requested.
|
|
|
|
weight = font->GetWeight();
|
|
|
|
stretch = font->GetStretch();
|
|
|
|
style = font->GetStyle();
|
|
|
|
|
2019-07-30 23:32:23 +02:00
|
|
|
// Dig the family name out at the end to return it.
|
|
|
|
familyName = _GetFontFamilyName(fontFamily.Get(), localeName);
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return fontFace;
|
|
|
|
}
|
|
|
|
|
2019-07-30 23:32:23 +02:00
|
|
|
// Routine Description:
|
|
|
|
// - Helper to retrieve the user's locale preference or fallback to the default.
|
|
|
|
// Arguments:
|
|
|
|
// - <none>
|
|
|
|
// Return Value:
|
|
|
|
// - A locale that can be used on construction of assorted DX objects that want to know one.
|
|
|
|
[[nodiscard]] std::wstring DxEngine::_GetLocaleName() const
|
|
|
|
{
|
|
|
|
std::array<wchar_t, LOCALE_NAME_MAX_LENGTH> localeName;
|
|
|
|
|
|
|
|
const auto returnCode = GetUserDefaultLocaleName(localeName.data(), gsl::narrow<int>(localeName.size()));
|
|
|
|
if (returnCode)
|
|
|
|
{
|
|
|
|
return { localeName.data() };
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
return { FALLBACK_LOCALE.data(), FALLBACK_LOCALE.size() };
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Retrieves the font family name out of the given object in the given locale.
|
|
|
|
// - If we can't find a valid name for the given locale, we'll fallback and report it back.
|
|
|
|
// Arguments:
|
|
|
|
// - fontFamily - DirectWrite font family object
|
|
|
|
// - localeName - The locale in which the name should be retrieved.
|
|
|
|
// - If fallback occurred, this is updated to what we retrieved instead.
|
|
|
|
// Return Value:
|
|
|
|
// - Localized string name of the font family
|
2019-09-04 00:14:44 +02:00
|
|
|
[[nodiscard]] std::wstring DxEngine::_GetFontFamilyName(gsl::not_null<IDWriteFontFamily*> const fontFamily,
|
2019-07-30 23:32:23 +02:00
|
|
|
std::wstring& localeName) const
|
|
|
|
{
|
|
|
|
// See: https://docs.microsoft.com/en-us/windows/win32/api/dwrite/nn-dwrite-idwritefontcollection
|
|
|
|
Microsoft::WRL::ComPtr<IDWriteLocalizedStrings> familyNames;
|
|
|
|
THROW_IF_FAILED(fontFamily->GetFamilyNames(&familyNames));
|
|
|
|
|
|
|
|
// First we have to find the right family name for the locale. We're going to bias toward what the caller
|
|
|
|
// requested, but fallback if we need to and reply with the locale we ended up choosing.
|
|
|
|
UINT32 index = 0;
|
|
|
|
BOOL exists = false;
|
|
|
|
|
|
|
|
// This returns S_OK whether or not it finds a locale name. Check exists field instead.
|
|
|
|
// If it returns an error, it's a real problem, not an absence of this locale name.
|
|
|
|
// https://docs.microsoft.com/en-us/windows/win32/api/dwrite/nf-dwrite-idwritelocalizedstrings-findlocalename
|
|
|
|
THROW_IF_FAILED(familyNames->FindLocaleName(localeName.data(), &index, &exists));
|
|
|
|
|
|
|
|
// If we tried and it still doesn't exist, try with the fallback locale.
|
|
|
|
if (!exists)
|
|
|
|
{
|
|
|
|
localeName = FALLBACK_LOCALE;
|
|
|
|
THROW_IF_FAILED(familyNames->FindLocaleName(localeName.data(), &index, &exists));
|
|
|
|
}
|
|
|
|
|
|
|
|
// If it still doesn't exist, we're going to try index 0.
|
|
|
|
if (!exists)
|
|
|
|
{
|
|
|
|
index = 0;
|
|
|
|
|
|
|
|
// Get the locale name out so at least the caller knows what locale this name goes with.
|
|
|
|
UINT32 length = 0;
|
|
|
|
THROW_IF_FAILED(familyNames->GetLocaleNameLength(index, &length));
|
2019-10-21 22:42:53 +02:00
|
|
|
localeName.resize(length);
|
2019-07-30 23:32:23 +02:00
|
|
|
|
|
|
|
// https://docs.microsoft.com/en-us/windows/win32/api/dwrite/nf-dwrite-idwritelocalizedstrings-getlocalenamelength
|
|
|
|
// https://docs.microsoft.com/en-us/windows/win32/api/dwrite/nf-dwrite-idwritelocalizedstrings-getlocalename
|
|
|
|
// GetLocaleNameLength does not include space for null terminator, but GetLocaleName needs it so add one.
|
2019-10-21 22:42:53 +02:00
|
|
|
THROW_IF_FAILED(familyNames->GetLocaleName(index, localeName.data(), length + 1));
|
2019-07-30 23:32:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// OK, now that we've decided which family name and the locale that it's in... let's go get it.
|
|
|
|
UINT32 length = 0;
|
|
|
|
THROW_IF_FAILED(familyNames->GetStringLength(index, &length));
|
|
|
|
|
|
|
|
// Make our output buffer and resize it so it is allocated.
|
|
|
|
std::wstring retVal;
|
|
|
|
retVal.resize(length);
|
|
|
|
|
|
|
|
// FINALLY, go fetch the string name.
|
2019-10-21 22:42:53 +02:00
|
|
|
// https://docs.microsoft.com/en-us/windows/win32/api/dwrite/nf-dwrite-idwritelocalizedstrings-getstringlength
|
|
|
|
// https://docs.microsoft.com/en-us/windows/win32/api/dwrite/nf-dwrite-idwritelocalizedstrings-getstring
|
|
|
|
// Once again, GetStringLength is without the null, but GetString needs the null. So add one.
|
|
|
|
THROW_IF_FAILED(familyNames->GetString(index, retVal.data(), length + 1));
|
2019-07-30 23:32:23 +02:00
|
|
|
|
|
|
|
// and return it.
|
|
|
|
return retVal;
|
|
|
|
}
|
|
|
|
|
2019-05-03 00:29:04 +02:00
|
|
|
// Routine Description:
|
|
|
|
// - Updates the font used for drawing
|
|
|
|
// Arguments:
|
|
|
|
// - desired - Information specifying the font that is requested
|
|
|
|
// - actual - Filled with the nearest font actually chosen for drawing
|
|
|
|
// - dpi - The DPI of the screen
|
|
|
|
// Return Value:
|
|
|
|
// - S_OK or relevant DirectX error
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] HRESULT DxEngine::_GetProposedFont(const FontInfoDesired& desired,
|
|
|
|
FontInfo& actual,
|
|
|
|
const int dpi,
|
2019-07-12 00:20:15 +02:00
|
|
|
Microsoft::WRL::ComPtr<IDWriteTextFormat>& textFormat,
|
2019-06-11 22:27:09 +02:00
|
|
|
Microsoft::WRL::ComPtr<IDWriteTextAnalyzer1>& textAnalyzer,
|
Refactor grid line renderers with support for more line types (#7107)
This is a refactoring of the grid line renderers, adjusting the line
widths to scale with the font size, and optimising the implementation to
cut down on the number of draw calls. It also extends the supported grid
line types to include true underlines and strike-through lines in the
style of the active font.
The main gist of the optimisation was to render the horizontal lines
with a single draw call, instead of a loop with lots of little strokes
joined together. In the case of the vertical lines, which still needed
to be handled in a loop, I've tried to move the majority of static
calculations outside the loop, so there is bit of optimisation there
too.
At the same time this code was updated to support a variable stroke
width for the lines, instead of having them hardcoded to 1 pixel. The
width is now calculated as a fraction of the font size (0.025 "em"),
which is still going to be 1 pixel wide in most typical usage, but will
scale up appropriately if you zoom in far enough.
And in preparation for supporting the SGR strike-through attribute, and
true underlines, I've extended the grid line renders with options for
handling those line types as well. The offset and thickness of the lines
is obtained from the font metrics (rounded to a pixel width, with a
minimum of one pixel), so they match the style of the font.
VALIDATION
For now we're still only rendering grid lines, and only the top and
bottom lines in the case of the DirectX renderer in Windows Terminal. So
to test, I hacked in some code to force the renderer to use all the
different options, confirming that they were working in both the GDI and
DirectX renderers.
I've tested the output with a number of different fonts, comparing it
with the same text rendered in WordPad. For the most part they match
exactly, but there can be slight differences when we adjust the font
size for grid alignment. And in the case of the GDI renderer, where
we're working with pixel heights rather than points, it's difficult to
match the sizes exactly.
This is a first step towards supporting the strike-through attribute
(#6205) and true underlines (#2915).
Closes #6911
2020-07-31 00:43:37 +02:00
|
|
|
Microsoft::WRL::ComPtr<IDWriteFontFace1>& fontFace,
|
|
|
|
LineMetrics& lineMetrics) const noexcept
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
|
|
|
try
|
|
|
|
{
|
2019-07-30 23:32:23 +02:00
|
|
|
std::wstring fontName(desired.GetFaceName());
|
2020-05-20 22:17:17 +02:00
|
|
|
DWRITE_FONT_WEIGHT weight = static_cast<DWRITE_FONT_WEIGHT>(desired.GetWeight());
|
2019-07-30 23:32:23 +02:00
|
|
|
DWRITE_FONT_STYLE style = DWRITE_FONT_STYLE_NORMAL;
|
|
|
|
DWRITE_FONT_STRETCH stretch = DWRITE_FONT_STRETCH_NORMAL;
|
|
|
|
std::wstring localeName = _GetLocaleName();
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2020-03-16 17:19:47 +01:00
|
|
|
// _ResolveFontFaceWithFallback overrides the last argument with the locale name of the font,
|
|
|
|
// but we should use the system's locale to render the text.
|
|
|
|
std::wstring fontLocaleName = localeName;
|
|
|
|
const auto face = _ResolveFontFaceWithFallback(fontName, weight, stretch, style, fontLocaleName);
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
DWRITE_FONT_METRICS1 fontMetrics;
|
|
|
|
face->GetMetrics(&fontMetrics);
|
|
|
|
|
2020-03-02 20:21:07 +01:00
|
|
|
const UINT32 spaceCodePoint = L'M';
|
2019-05-03 00:29:04 +02:00
|
|
|
UINT16 spaceGlyphIndex;
|
|
|
|
THROW_IF_FAILED(face->GetGlyphIndicesW(&spaceCodePoint, 1, &spaceGlyphIndex));
|
|
|
|
|
|
|
|
INT32 advanceInDesignUnits;
|
|
|
|
THROW_IF_FAILED(face->GetDesignGlyphAdvances(1, &spaceGlyphIndex, &advanceInDesignUnits));
|
|
|
|
|
Scale box drawing glyphs to fit cells for visual bliss (#5743)
## Summary of the Pull Request
Identifies and scales glyphs in the box and line drawing ranges U+2500-U+259F to fit their cells.
## PR Checklist
* [x] Closes #455
* [x] I work here.
* [x] Manual tests. This is all graphical.
* [x] Metric ton of comments
* [x] Math spreadsheet included in PR.
* [x] Double check RTL glyphs.
* [x] Why is there the extra pixel?
* [x] Scrolling the mouse wheel check is done.
* [x] Not drawing outline?
* [x] Am core contributor. Roar.
* [x] Try suppressing negative scale factors and see if that gets rid of weird shading.
## Detailed Description of the Pull Request / Additional comments
### Background
- We want the Terminal to be fast at drawing. To be fast at drawing, we perform differential drawing, or only drawing what is different from the previous frame. We use DXGI's `Present1` method to help us with this as it helps us compose only the deltas onto the previous frame at drawing time and assists us in scrolling regions from the previous frame without intervention. However, it only works on strictly integer pixel row heights.
- Most of the hit testing and size-calculation logic in both the `conhost` and the Terminal products are based on the size of an individual cell. Historically, a cell was always dictated in a `COORD` structure, or two `SHORT` values... which are integers. As such, when we specify the space for any individual glyph to be displayed inside our terminal drawing region, we want it to fall perfectly inside of an integer box to ensure all these other algorithms work correctly and continue to do so.
- Finally, we want the Terminal to have font fallback and locate glyphs that aren't in the primary selected font from any other font it can find on the system that contains the glyph, per DirectWrite's font fallback mechanisms. These glyphs won't necessarily have the same font or glyph metrics as the base font, but we need them to fit inside the same cell dimensions as if they did because the hit testing and other algorithms aren't aware of which particular font is sourcing each glyph, just the dimensions of the bounding box per cell.
### How does Terminal deal with this?
- When we select a font, we perform some calculations using the design metrics of the font and glyphs to determine how we could fit them inside a cell with integer dimensions. Our process here is that we take the requested font size (which is generally a proxy for height), find the matching glyph width for that height then round it to an integer. We back convert from that now integer width to a height value which is almost certainly now a floating point number. But because we need an integer box value, we add line padding above and below the glyphs to ensure that the height is an integer as well as the width. Finally, we don't add the padding strictly equally. We attempt to align the English baseline of the glyph box directly onto an integer pixel multiple so most characters sit crisply on a line when displayed.
- Note that fonts and their glyphs have a prescribed baseline, line gap, and advance values. We use those as guidelines to get us started, but then to meet our requirements, we pad out from those. This results in fonts that should be properly authored showing gaps. It also results in fonts that are improperly authored looking even worse than they normally would.
### Now how does block and line drawing come in?
- Block and Line drawing glyphs are generally authored so they will look fine when the font and glyph metrics are followed exactly as prescribed by the font. (For some fonts, this still isn't true and we want them to look fine anyway.)
- When we add additional padding or rounding to make glyphs fit inside of a cell, we can be adding more space than was prescribed around these glyphs. This can cause a gap to be visible.
- Additionally, when we move things like baselines to land on a perfect integer pixel, we may be drawing a glyph lower in the bounding box than was prescribed originally.
### And how do we solve it?
- We identify all glyphs in the line and block drawing ranges.
- We find the bounding boxes of both the cell and the glyph.
- We compare the height of the glyph to the height of the cell to see if we need to scale. We prescribe a scale transform if the glyph wouldn't be tall enough to fit the box. (We leave it alone otherwise as some glyphs intentionally overscan the box and scaling them can cause banding effects.)
- We inspect the overhang/underhang above and below the boxes and translate transform them (slide them) so they cover the entire cell area.
- We repeat the previous two steps but in the horizontal direction.
## Validation Steps Performed
- See these commments:
- https://github.com/microsoft/terminal/issues/455#issuecomment-620248375
- https://github.com/microsoft/terminal/issues/455#issuecomment-621533916
- https://github.com/microsoft/terminal/issues/455#issuecomment-622585453
Also see the below one with more screenshots:
- https://github.com/microsoft/terminal/pull/5743#issuecomment-624940567
2020-05-08 23:09:32 +02:00
|
|
|
DWRITE_GLYPH_METRICS spaceMetrics = { 0 };
|
|
|
|
THROW_IF_FAILED(face->GetDesignGlyphMetrics(&spaceGlyphIndex, 1, &spaceMetrics));
|
|
|
|
|
2019-05-03 00:29:04 +02:00
|
|
|
// The math here is actually:
|
|
|
|
// Requested Size in Points * DPI scaling factor * Points to Pixels scaling factor.
|
|
|
|
// - DPI = dots per inch
|
|
|
|
// - PPI = points per inch or "points" as usually seen when choosing a font size
|
|
|
|
// - The DPI scaling factor is the current monitor DPI divided by 96, the default DPI.
|
|
|
|
// - The Points to Pixels factor is based on the typography definition of 72 points per inch.
|
|
|
|
// As such, converting requires taking the 96 pixel per inch default and dividing by the 72 points per inch
|
|
|
|
// to get a factor of 1 and 1/3.
|
|
|
|
// This turns into something like:
|
|
|
|
// - 12 ppi font * (96 dpi / 96 dpi) * (96 dpi / 72 points per inch) = 16 pixels tall font for 100% display (96 dpi is 100%)
|
|
|
|
// - 12 ppi font * (144 dpi / 96 dpi) * (96 dpi / 72 points per inch) = 24 pixels tall font for 150% display (144 dpi is 150%)
|
|
|
|
// - 12 ppi font * (192 dpi / 96 dpi) * (96 dpi / 72 points per inch) = 32 pixels tall font for 200% display (192 dpi is 200%)
|
|
|
|
float heightDesired = static_cast<float>(desired.GetEngineSize().Y) * static_cast<float>(USER_DEFAULT_SCREEN_DPI) / POINTS_PER_INCH;
|
|
|
|
|
|
|
|
// The advance is the number of pixels left-to-right (X dimension) for the given font.
|
|
|
|
// We're finding a proportional factor here with the design units in "ems", not an actual pixel measurement.
|
|
|
|
|
Adjusts High DPI scaling to enable differential rendering (#5345)
## Summary of the Pull Request
- Adjusts scaling practices in `DxEngine` (and related scaling practices in `TerminalControl`) for pixel-perfect row baselines and spacing at High DPI such that differential row-by-row rendering can be applied at High DPI.
## References
- #5185
## PR Checklist
* [x] Closes #5320, closes #3515, closes #1064
* [x] I work here.
* [x] Manually tested.
* [x] No doc.
* [x] Am core contributor. Also discussed with some of them already via Teams.
## Detailed Description of the Pull Request / Additional comments
**WAS:**
- We were using implicit DPI scaling on the `ID2D1RenderTarget` and running all of our processing in DIPs (Device-Independent Pixels). That's all well and good for getting things bootstrapped quickly, but it leaves the actual scaling of the draw commands up to the discretion of the rendering target.
- When we don't get to explicitly choose exactly how many pixels tall/wide and our X/Y placement perfectly, the nature of floating point multiplication and division required to do the presentation can cause us to drift off slightly out of our control depending on what the final display resolution actually is.
- Differential drawing cannot work unless we can know the exact integer pixels that need to be copied/moved/preserved/replaced between frames to give to the `IDXGISwapChain1::Present1` method. If things spill into fractional pixels or the sizes of rows/columns vary as they are rounded up and down implicitly, then we cannot do the differential rendering.
**NOW:**
- When deciding on a font, the `DxEngine` will take the scale factor into account and adjust the proposed height of the requested font. Then the remainder of the existing code that adjusts the baseline and integer-ifies each character cell will run naturally from there. That code already works correctly to align the height at normal DPI and scale out the font heights and advances to take an exact integer of pixels.
- `TermControl` has to use the scale now, in some places, and stop scaling in other places. This has to do with how the target's nature used to be implicit and is now explicit. For instance, determining where the cursor click hits must be scaled now. And determining the pixel size of the display canvas must no longer be scaled.
- `DxEngine` will no longer attempt to scale the invalid regions per my attempts in #5185 because the cell size is scaled. So it should work the same as at 96 DPI.
- The block is removed from the `DxEngine` that was causing a full invalidate on every frame at High DPI.
- A TODO was removed from `TermControl` that was invalidating everything when the DPI changed because the underlying renderer will already do that.
## Validation Steps Performed
* [x] Check at 150% DPI. Print text, scroll text down and up, do selection.
* [x] Check at 100% DPI. Print text, scroll text down and up, do selection.
* [x] Span two different DPI monitors and drag between them.
* [x] Giant pile of tests in https://github.com/microsoft/terminal/pull/5345#issuecomment-614127648
Co-authored-by: Dustin Howett <duhowett@microsoft.com>
Co-authored-by: Mike Griese <migrie@microsoft.com>
2020-04-22 23:59:51 +02:00
|
|
|
// Now we play trickery with the font size. Scale by the DPI to get the height we expect.
|
|
|
|
heightDesired *= (static_cast<float>(dpi) / static_cast<float>(USER_DEFAULT_SCREEN_DPI));
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
const float widthAdvance = static_cast<float>(advanceInDesignUnits) / fontMetrics.designUnitsPerEm;
|
|
|
|
|
|
|
|
// Use the real pixel height desired by the "em" factor for the width to get the number of pixels
|
|
|
|
// we will need per character in width. This will almost certainly result in fractional X-dimension pixels.
|
|
|
|
const float widthApprox = heightDesired * widthAdvance;
|
|
|
|
|
|
|
|
// Since we can't deal with columns of the presentation grid being fractional pixels in width, round to the nearest whole pixel.
|
|
|
|
const float widthExact = round(widthApprox);
|
|
|
|
|
|
|
|
// Now reverse the "em" factor from above to turn the exact pixel width into a (probably) fractional
|
|
|
|
// height in pixels of each character. It's easier for us to pad out height and align vertically
|
|
|
|
// than it is horizontally.
|
|
|
|
const auto fontSize = widthExact / widthAdvance;
|
|
|
|
|
|
|
|
// Now figure out the basic properties of the character height which include ascent and descent
|
|
|
|
// for this specific font size.
|
|
|
|
const float ascent = (fontSize * fontMetrics.ascent) / fontMetrics.designUnitsPerEm;
|
|
|
|
const float descent = (fontSize * fontMetrics.descent) / fontMetrics.designUnitsPerEm;
|
|
|
|
|
Scale box drawing glyphs to fit cells for visual bliss (#5743)
## Summary of the Pull Request
Identifies and scales glyphs in the box and line drawing ranges U+2500-U+259F to fit their cells.
## PR Checklist
* [x] Closes #455
* [x] I work here.
* [x] Manual tests. This is all graphical.
* [x] Metric ton of comments
* [x] Math spreadsheet included in PR.
* [x] Double check RTL glyphs.
* [x] Why is there the extra pixel?
* [x] Scrolling the mouse wheel check is done.
* [x] Not drawing outline?
* [x] Am core contributor. Roar.
* [x] Try suppressing negative scale factors and see if that gets rid of weird shading.
## Detailed Description of the Pull Request / Additional comments
### Background
- We want the Terminal to be fast at drawing. To be fast at drawing, we perform differential drawing, or only drawing what is different from the previous frame. We use DXGI's `Present1` method to help us with this as it helps us compose only the deltas onto the previous frame at drawing time and assists us in scrolling regions from the previous frame without intervention. However, it only works on strictly integer pixel row heights.
- Most of the hit testing and size-calculation logic in both the `conhost` and the Terminal products are based on the size of an individual cell. Historically, a cell was always dictated in a `COORD` structure, or two `SHORT` values... which are integers. As such, when we specify the space for any individual glyph to be displayed inside our terminal drawing region, we want it to fall perfectly inside of an integer box to ensure all these other algorithms work correctly and continue to do so.
- Finally, we want the Terminal to have font fallback and locate glyphs that aren't in the primary selected font from any other font it can find on the system that contains the glyph, per DirectWrite's font fallback mechanisms. These glyphs won't necessarily have the same font or glyph metrics as the base font, but we need them to fit inside the same cell dimensions as if they did because the hit testing and other algorithms aren't aware of which particular font is sourcing each glyph, just the dimensions of the bounding box per cell.
### How does Terminal deal with this?
- When we select a font, we perform some calculations using the design metrics of the font and glyphs to determine how we could fit them inside a cell with integer dimensions. Our process here is that we take the requested font size (which is generally a proxy for height), find the matching glyph width for that height then round it to an integer. We back convert from that now integer width to a height value which is almost certainly now a floating point number. But because we need an integer box value, we add line padding above and below the glyphs to ensure that the height is an integer as well as the width. Finally, we don't add the padding strictly equally. We attempt to align the English baseline of the glyph box directly onto an integer pixel multiple so most characters sit crisply on a line when displayed.
- Note that fonts and their glyphs have a prescribed baseline, line gap, and advance values. We use those as guidelines to get us started, but then to meet our requirements, we pad out from those. This results in fonts that should be properly authored showing gaps. It also results in fonts that are improperly authored looking even worse than they normally would.
### Now how does block and line drawing come in?
- Block and Line drawing glyphs are generally authored so they will look fine when the font and glyph metrics are followed exactly as prescribed by the font. (For some fonts, this still isn't true and we want them to look fine anyway.)
- When we add additional padding or rounding to make glyphs fit inside of a cell, we can be adding more space than was prescribed around these glyphs. This can cause a gap to be visible.
- Additionally, when we move things like baselines to land on a perfect integer pixel, we may be drawing a glyph lower in the bounding box than was prescribed originally.
### And how do we solve it?
- We identify all glyphs in the line and block drawing ranges.
- We find the bounding boxes of both the cell and the glyph.
- We compare the height of the glyph to the height of the cell to see if we need to scale. We prescribe a scale transform if the glyph wouldn't be tall enough to fit the box. (We leave it alone otherwise as some glyphs intentionally overscan the box and scaling them can cause banding effects.)
- We inspect the overhang/underhang above and below the boxes and translate transform them (slide them) so they cover the entire cell area.
- We repeat the previous two steps but in the horizontal direction.
## Validation Steps Performed
- See these commments:
- https://github.com/microsoft/terminal/issues/455#issuecomment-620248375
- https://github.com/microsoft/terminal/issues/455#issuecomment-621533916
- https://github.com/microsoft/terminal/issues/455#issuecomment-622585453
Also see the below one with more screenshots:
- https://github.com/microsoft/terminal/pull/5743#issuecomment-624940567
2020-05-08 23:09:32 +02:00
|
|
|
// Get the gap.
|
|
|
|
const float gap = (fontSize * fontMetrics.lineGap) / fontMetrics.designUnitsPerEm;
|
|
|
|
const float halfGap = gap / 2;
|
|
|
|
|
2019-05-03 00:29:04 +02:00
|
|
|
// We're going to build a line spacing object here to track all of this data in our format.
|
|
|
|
DWRITE_LINE_SPACING lineSpacing = {};
|
|
|
|
lineSpacing.method = DWRITE_LINE_SPACING_METHOD_UNIFORM;
|
|
|
|
|
|
|
|
// We need to make sure the baseline falls on a round pixel (not a fractional pixel).
|
|
|
|
// If the baseline is fractional, the text appears blurry, especially at small scales.
|
|
|
|
// Since we also need to make sure the bounding box as a whole is round pixels
|
|
|
|
// (because the entire console system maths in full cell units),
|
|
|
|
// we're just going to ceiling up the ascent and descent to make a full pixel amount
|
|
|
|
// and set the baseline to the full round pixel ascent value.
|
|
|
|
//
|
|
|
|
// For reference, for the letters "ag":
|
Scale box drawing glyphs to fit cells for visual bliss (#5743)
## Summary of the Pull Request
Identifies and scales glyphs in the box and line drawing ranges U+2500-U+259F to fit their cells.
## PR Checklist
* [x] Closes #455
* [x] I work here.
* [x] Manual tests. This is all graphical.
* [x] Metric ton of comments
* [x] Math spreadsheet included in PR.
* [x] Double check RTL glyphs.
* [x] Why is there the extra pixel?
* [x] Scrolling the mouse wheel check is done.
* [x] Not drawing outline?
* [x] Am core contributor. Roar.
* [x] Try suppressing negative scale factors and see if that gets rid of weird shading.
## Detailed Description of the Pull Request / Additional comments
### Background
- We want the Terminal to be fast at drawing. To be fast at drawing, we perform differential drawing, or only drawing what is different from the previous frame. We use DXGI's `Present1` method to help us with this as it helps us compose only the deltas onto the previous frame at drawing time and assists us in scrolling regions from the previous frame without intervention. However, it only works on strictly integer pixel row heights.
- Most of the hit testing and size-calculation logic in both the `conhost` and the Terminal products are based on the size of an individual cell. Historically, a cell was always dictated in a `COORD` structure, or two `SHORT` values... which are integers. As such, when we specify the space for any individual glyph to be displayed inside our terminal drawing region, we want it to fall perfectly inside of an integer box to ensure all these other algorithms work correctly and continue to do so.
- Finally, we want the Terminal to have font fallback and locate glyphs that aren't in the primary selected font from any other font it can find on the system that contains the glyph, per DirectWrite's font fallback mechanisms. These glyphs won't necessarily have the same font or glyph metrics as the base font, but we need them to fit inside the same cell dimensions as if they did because the hit testing and other algorithms aren't aware of which particular font is sourcing each glyph, just the dimensions of the bounding box per cell.
### How does Terminal deal with this?
- When we select a font, we perform some calculations using the design metrics of the font and glyphs to determine how we could fit them inside a cell with integer dimensions. Our process here is that we take the requested font size (which is generally a proxy for height), find the matching glyph width for that height then round it to an integer. We back convert from that now integer width to a height value which is almost certainly now a floating point number. But because we need an integer box value, we add line padding above and below the glyphs to ensure that the height is an integer as well as the width. Finally, we don't add the padding strictly equally. We attempt to align the English baseline of the glyph box directly onto an integer pixel multiple so most characters sit crisply on a line when displayed.
- Note that fonts and their glyphs have a prescribed baseline, line gap, and advance values. We use those as guidelines to get us started, but then to meet our requirements, we pad out from those. This results in fonts that should be properly authored showing gaps. It also results in fonts that are improperly authored looking even worse than they normally would.
### Now how does block and line drawing come in?
- Block and Line drawing glyphs are generally authored so they will look fine when the font and glyph metrics are followed exactly as prescribed by the font. (For some fonts, this still isn't true and we want them to look fine anyway.)
- When we add additional padding or rounding to make glyphs fit inside of a cell, we can be adding more space than was prescribed around these glyphs. This can cause a gap to be visible.
- Additionally, when we move things like baselines to land on a perfect integer pixel, we may be drawing a glyph lower in the bounding box than was prescribed originally.
### And how do we solve it?
- We identify all glyphs in the line and block drawing ranges.
- We find the bounding boxes of both the cell and the glyph.
- We compare the height of the glyph to the height of the cell to see if we need to scale. We prescribe a scale transform if the glyph wouldn't be tall enough to fit the box. (We leave it alone otherwise as some glyphs intentionally overscan the box and scaling them can cause banding effects.)
- We inspect the overhang/underhang above and below the boxes and translate transform them (slide them) so they cover the entire cell area.
- We repeat the previous two steps but in the horizontal direction.
## Validation Steps Performed
- See these commments:
- https://github.com/microsoft/terminal/issues/455#issuecomment-620248375
- https://github.com/microsoft/terminal/issues/455#issuecomment-621533916
- https://github.com/microsoft/terminal/issues/455#issuecomment-622585453
Also see the below one with more screenshots:
- https://github.com/microsoft/terminal/pull/5743#issuecomment-624940567
2020-05-08 23:09:32 +02:00
|
|
|
// ...
|
|
|
|
// gggggg bottom of previous line
|
2019-05-03 00:29:04 +02:00
|
|
|
//
|
Scale box drawing glyphs to fit cells for visual bliss (#5743)
## Summary of the Pull Request
Identifies and scales glyphs in the box and line drawing ranges U+2500-U+259F to fit their cells.
## PR Checklist
* [x] Closes #455
* [x] I work here.
* [x] Manual tests. This is all graphical.
* [x] Metric ton of comments
* [x] Math spreadsheet included in PR.
* [x] Double check RTL glyphs.
* [x] Why is there the extra pixel?
* [x] Scrolling the mouse wheel check is done.
* [x] Not drawing outline?
* [x] Am core contributor. Roar.
* [x] Try suppressing negative scale factors and see if that gets rid of weird shading.
## Detailed Description of the Pull Request / Additional comments
### Background
- We want the Terminal to be fast at drawing. To be fast at drawing, we perform differential drawing, or only drawing what is different from the previous frame. We use DXGI's `Present1` method to help us with this as it helps us compose only the deltas onto the previous frame at drawing time and assists us in scrolling regions from the previous frame without intervention. However, it only works on strictly integer pixel row heights.
- Most of the hit testing and size-calculation logic in both the `conhost` and the Terminal products are based on the size of an individual cell. Historically, a cell was always dictated in a `COORD` structure, or two `SHORT` values... which are integers. As such, when we specify the space for any individual glyph to be displayed inside our terminal drawing region, we want it to fall perfectly inside of an integer box to ensure all these other algorithms work correctly and continue to do so.
- Finally, we want the Terminal to have font fallback and locate glyphs that aren't in the primary selected font from any other font it can find on the system that contains the glyph, per DirectWrite's font fallback mechanisms. These glyphs won't necessarily have the same font or glyph metrics as the base font, but we need them to fit inside the same cell dimensions as if they did because the hit testing and other algorithms aren't aware of which particular font is sourcing each glyph, just the dimensions of the bounding box per cell.
### How does Terminal deal with this?
- When we select a font, we perform some calculations using the design metrics of the font and glyphs to determine how we could fit them inside a cell with integer dimensions. Our process here is that we take the requested font size (which is generally a proxy for height), find the matching glyph width for that height then round it to an integer. We back convert from that now integer width to a height value which is almost certainly now a floating point number. But because we need an integer box value, we add line padding above and below the glyphs to ensure that the height is an integer as well as the width. Finally, we don't add the padding strictly equally. We attempt to align the English baseline of the glyph box directly onto an integer pixel multiple so most characters sit crisply on a line when displayed.
- Note that fonts and their glyphs have a prescribed baseline, line gap, and advance values. We use those as guidelines to get us started, but then to meet our requirements, we pad out from those. This results in fonts that should be properly authored showing gaps. It also results in fonts that are improperly authored looking even worse than they normally would.
### Now how does block and line drawing come in?
- Block and Line drawing glyphs are generally authored so they will look fine when the font and glyph metrics are followed exactly as prescribed by the font. (For some fonts, this still isn't true and we want them to look fine anyway.)
- When we add additional padding or rounding to make glyphs fit inside of a cell, we can be adding more space than was prescribed around these glyphs. This can cause a gap to be visible.
- Additionally, when we move things like baselines to land on a perfect integer pixel, we may be drawing a glyph lower in the bounding box than was prescribed originally.
### And how do we solve it?
- We identify all glyphs in the line and block drawing ranges.
- We find the bounding boxes of both the cell and the glyph.
- We compare the height of the glyph to the height of the cell to see if we need to scale. We prescribe a scale transform if the glyph wouldn't be tall enough to fit the box. (We leave it alone otherwise as some glyphs intentionally overscan the box and scaling them can cause banding effects.)
- We inspect the overhang/underhang above and below the boxes and translate transform them (slide them) so they cover the entire cell area.
- We repeat the previous two steps but in the horizontal direction.
## Validation Steps Performed
- See these commments:
- https://github.com/microsoft/terminal/issues/455#issuecomment-620248375
- https://github.com/microsoft/terminal/issues/455#issuecomment-621533916
- https://github.com/microsoft/terminal/issues/455#issuecomment-622585453
Also see the below one with more screenshots:
- https://github.com/microsoft/terminal/pull/5743#issuecomment-624940567
2020-05-08 23:09:32 +02:00
|
|
|
// ----------------- <===========================================|
|
|
|
|
// | topSideBearing | 1/2 lineGap |
|
|
|
|
// aaaaaa ggggggg <-------------------------|-------------| |
|
|
|
|
// a g g | | |
|
|
|
|
// aaaaa ggggg |<-ascent | |
|
|
|
|
// a a g | | |---- lineHeight
|
|
|
|
// aaaaa a gggggg <----baseline, verticalOriginY----------|---|
|
|
|
|
// g g |<-descent | |
|
|
|
|
// gggggg <-------------------------|-------------| |
|
|
|
|
// | bottomSideBearing | 1/2 lineGap |
|
|
|
|
// ----------------- <===========================================|
|
|
|
|
//
|
|
|
|
// aaaaaa ggggggg top of next line
|
|
|
|
// ...
|
|
|
|
//
|
|
|
|
// Also note...
|
|
|
|
// We're going to add half the line gap to the ascent and half the line gap to the descent
|
|
|
|
// to ensure that the spacing is balanced vertically.
|
|
|
|
// Generally speaking, the line gap is added to the ascent by DirectWrite itself for
|
|
|
|
// horizontally drawn text which can place the baseline and glyphs "lower" in the drawing
|
|
|
|
// box than would be desired for proper alignment of things like line and box characters
|
|
|
|
// which will try to sit centered in the area and touch perfectly with their neighbors.
|
|
|
|
|
|
|
|
const auto fullPixelAscent = ceil(ascent + halfGap);
|
|
|
|
const auto fullPixelDescent = ceil(descent + halfGap);
|
2019-05-03 00:29:04 +02:00
|
|
|
lineSpacing.height = fullPixelAscent + fullPixelDescent;
|
|
|
|
lineSpacing.baseline = fullPixelAscent;
|
|
|
|
|
Scale box drawing glyphs to fit cells for visual bliss (#5743)
## Summary of the Pull Request
Identifies and scales glyphs in the box and line drawing ranges U+2500-U+259F to fit their cells.
## PR Checklist
* [x] Closes #455
* [x] I work here.
* [x] Manual tests. This is all graphical.
* [x] Metric ton of comments
* [x] Math spreadsheet included in PR.
* [x] Double check RTL glyphs.
* [x] Why is there the extra pixel?
* [x] Scrolling the mouse wheel check is done.
* [x] Not drawing outline?
* [x] Am core contributor. Roar.
* [x] Try suppressing negative scale factors and see if that gets rid of weird shading.
## Detailed Description of the Pull Request / Additional comments
### Background
- We want the Terminal to be fast at drawing. To be fast at drawing, we perform differential drawing, or only drawing what is different from the previous frame. We use DXGI's `Present1` method to help us with this as it helps us compose only the deltas onto the previous frame at drawing time and assists us in scrolling regions from the previous frame without intervention. However, it only works on strictly integer pixel row heights.
- Most of the hit testing and size-calculation logic in both the `conhost` and the Terminal products are based on the size of an individual cell. Historically, a cell was always dictated in a `COORD` structure, or two `SHORT` values... which are integers. As such, when we specify the space for any individual glyph to be displayed inside our terminal drawing region, we want it to fall perfectly inside of an integer box to ensure all these other algorithms work correctly and continue to do so.
- Finally, we want the Terminal to have font fallback and locate glyphs that aren't in the primary selected font from any other font it can find on the system that contains the glyph, per DirectWrite's font fallback mechanisms. These glyphs won't necessarily have the same font or glyph metrics as the base font, but we need them to fit inside the same cell dimensions as if they did because the hit testing and other algorithms aren't aware of which particular font is sourcing each glyph, just the dimensions of the bounding box per cell.
### How does Terminal deal with this?
- When we select a font, we perform some calculations using the design metrics of the font and glyphs to determine how we could fit them inside a cell with integer dimensions. Our process here is that we take the requested font size (which is generally a proxy for height), find the matching glyph width for that height then round it to an integer. We back convert from that now integer width to a height value which is almost certainly now a floating point number. But because we need an integer box value, we add line padding above and below the glyphs to ensure that the height is an integer as well as the width. Finally, we don't add the padding strictly equally. We attempt to align the English baseline of the glyph box directly onto an integer pixel multiple so most characters sit crisply on a line when displayed.
- Note that fonts and their glyphs have a prescribed baseline, line gap, and advance values. We use those as guidelines to get us started, but then to meet our requirements, we pad out from those. This results in fonts that should be properly authored showing gaps. It also results in fonts that are improperly authored looking even worse than they normally would.
### Now how does block and line drawing come in?
- Block and Line drawing glyphs are generally authored so they will look fine when the font and glyph metrics are followed exactly as prescribed by the font. (For some fonts, this still isn't true and we want them to look fine anyway.)
- When we add additional padding or rounding to make glyphs fit inside of a cell, we can be adding more space than was prescribed around these glyphs. This can cause a gap to be visible.
- Additionally, when we move things like baselines to land on a perfect integer pixel, we may be drawing a glyph lower in the bounding box than was prescribed originally.
### And how do we solve it?
- We identify all glyphs in the line and block drawing ranges.
- We find the bounding boxes of both the cell and the glyph.
- We compare the height of the glyph to the height of the cell to see if we need to scale. We prescribe a scale transform if the glyph wouldn't be tall enough to fit the box. (We leave it alone otherwise as some glyphs intentionally overscan the box and scaling them can cause banding effects.)
- We inspect the overhang/underhang above and below the boxes and translate transform them (slide them) so they cover the entire cell area.
- We repeat the previous two steps but in the horizontal direction.
## Validation Steps Performed
- See these commments:
- https://github.com/microsoft/terminal/issues/455#issuecomment-620248375
- https://github.com/microsoft/terminal/issues/455#issuecomment-621533916
- https://github.com/microsoft/terminal/issues/455#issuecomment-622585453
Also see the below one with more screenshots:
- https://github.com/microsoft/terminal/pull/5743#issuecomment-624940567
2020-05-08 23:09:32 +02:00
|
|
|
// According to MSDN (https://docs.microsoft.com/en-us/windows/win32/api/dwrite_3/ne-dwrite_3-dwrite_font_line_gap_usage)
|
|
|
|
// Setting "ENABLED" means we've included the line gapping in the spacing numbers given.
|
|
|
|
lineSpacing.fontLineGapUsage = DWRITE_FONT_LINE_GAP_USAGE_ENABLED;
|
|
|
|
|
2019-05-03 00:29:04 +02:00
|
|
|
// Create the font with the fractional pixel height size.
|
|
|
|
// It should have an integer pixel width by our math above.
|
|
|
|
// Then below, apply the line spacing to the format to position the floating point pixel height characters
|
|
|
|
// into a cell that has an integer pixel height leaving some padding above/below as necessary to round them out.
|
|
|
|
Microsoft::WRL::ComPtr<IDWriteTextFormat> format;
|
|
|
|
THROW_IF_FAILED(_dwriteFactory->CreateTextFormat(fontName.data(),
|
|
|
|
nullptr,
|
|
|
|
weight,
|
|
|
|
style,
|
|
|
|
stretch,
|
|
|
|
fontSize,
|
2019-07-30 23:32:23 +02:00
|
|
|
localeName.data(),
|
2019-05-03 00:29:04 +02:00
|
|
|
&format));
|
|
|
|
|
|
|
|
THROW_IF_FAILED(format.As(&textFormat));
|
|
|
|
|
|
|
|
Microsoft::WRL::ComPtr<IDWriteTextAnalyzer> analyzer;
|
|
|
|
THROW_IF_FAILED(_dwriteFactory->CreateTextAnalyzer(&analyzer));
|
|
|
|
THROW_IF_FAILED(analyzer.As(&textAnalyzer));
|
|
|
|
|
|
|
|
fontFace = face;
|
|
|
|
|
2019-07-12 00:20:15 +02:00
|
|
|
THROW_IF_FAILED(textFormat->SetLineSpacing(lineSpacing.method, lineSpacing.height, lineSpacing.baseline));
|
2019-05-03 00:29:04 +02:00
|
|
|
THROW_IF_FAILED(textFormat->SetParagraphAlignment(DWRITE_PARAGRAPH_ALIGNMENT_NEAR));
|
|
|
|
THROW_IF_FAILED(textFormat->SetWordWrapping(DWRITE_WORD_WRAPPING_NO_WRAP));
|
|
|
|
|
|
|
|
// The scaled size needs to represent the pixel box that each character will fit within for the purposes
|
|
|
|
// of hit testing math and other such multiplication/division.
|
|
|
|
COORD coordSize = { 0 };
|
|
|
|
coordSize.X = gsl::narrow<SHORT>(widthExact);
|
Scale box drawing glyphs to fit cells for visual bliss (#5743)
## Summary of the Pull Request
Identifies and scales glyphs in the box and line drawing ranges U+2500-U+259F to fit their cells.
## PR Checklist
* [x] Closes #455
* [x] I work here.
* [x] Manual tests. This is all graphical.
* [x] Metric ton of comments
* [x] Math spreadsheet included in PR.
* [x] Double check RTL glyphs.
* [x] Why is there the extra pixel?
* [x] Scrolling the mouse wheel check is done.
* [x] Not drawing outline?
* [x] Am core contributor. Roar.
* [x] Try suppressing negative scale factors and see if that gets rid of weird shading.
## Detailed Description of the Pull Request / Additional comments
### Background
- We want the Terminal to be fast at drawing. To be fast at drawing, we perform differential drawing, or only drawing what is different from the previous frame. We use DXGI's `Present1` method to help us with this as it helps us compose only the deltas onto the previous frame at drawing time and assists us in scrolling regions from the previous frame without intervention. However, it only works on strictly integer pixel row heights.
- Most of the hit testing and size-calculation logic in both the `conhost` and the Terminal products are based on the size of an individual cell. Historically, a cell was always dictated in a `COORD` structure, or two `SHORT` values... which are integers. As such, when we specify the space for any individual glyph to be displayed inside our terminal drawing region, we want it to fall perfectly inside of an integer box to ensure all these other algorithms work correctly and continue to do so.
- Finally, we want the Terminal to have font fallback and locate glyphs that aren't in the primary selected font from any other font it can find on the system that contains the glyph, per DirectWrite's font fallback mechanisms. These glyphs won't necessarily have the same font or glyph metrics as the base font, but we need them to fit inside the same cell dimensions as if they did because the hit testing and other algorithms aren't aware of which particular font is sourcing each glyph, just the dimensions of the bounding box per cell.
### How does Terminal deal with this?
- When we select a font, we perform some calculations using the design metrics of the font and glyphs to determine how we could fit them inside a cell with integer dimensions. Our process here is that we take the requested font size (which is generally a proxy for height), find the matching glyph width for that height then round it to an integer. We back convert from that now integer width to a height value which is almost certainly now a floating point number. But because we need an integer box value, we add line padding above and below the glyphs to ensure that the height is an integer as well as the width. Finally, we don't add the padding strictly equally. We attempt to align the English baseline of the glyph box directly onto an integer pixel multiple so most characters sit crisply on a line when displayed.
- Note that fonts and their glyphs have a prescribed baseline, line gap, and advance values. We use those as guidelines to get us started, but then to meet our requirements, we pad out from those. This results in fonts that should be properly authored showing gaps. It also results in fonts that are improperly authored looking even worse than they normally would.
### Now how does block and line drawing come in?
- Block and Line drawing glyphs are generally authored so they will look fine when the font and glyph metrics are followed exactly as prescribed by the font. (For some fonts, this still isn't true and we want them to look fine anyway.)
- When we add additional padding or rounding to make glyphs fit inside of a cell, we can be adding more space than was prescribed around these glyphs. This can cause a gap to be visible.
- Additionally, when we move things like baselines to land on a perfect integer pixel, we may be drawing a glyph lower in the bounding box than was prescribed originally.
### And how do we solve it?
- We identify all glyphs in the line and block drawing ranges.
- We find the bounding boxes of both the cell and the glyph.
- We compare the height of the glyph to the height of the cell to see if we need to scale. We prescribe a scale transform if the glyph wouldn't be tall enough to fit the box. (We leave it alone otherwise as some glyphs intentionally overscan the box and scaling them can cause banding effects.)
- We inspect the overhang/underhang above and below the boxes and translate transform them (slide them) so they cover the entire cell area.
- We repeat the previous two steps but in the horizontal direction.
## Validation Steps Performed
- See these commments:
- https://github.com/microsoft/terminal/issues/455#issuecomment-620248375
- https://github.com/microsoft/terminal/issues/455#issuecomment-621533916
- https://github.com/microsoft/terminal/issues/455#issuecomment-622585453
Also see the below one with more screenshots:
- https://github.com/microsoft/terminal/pull/5743#issuecomment-624940567
2020-05-08 23:09:32 +02:00
|
|
|
coordSize.Y = gsl::narrow_cast<SHORT>(lineSpacing.height);
|
2019-05-03 00:29:04 +02:00
|
|
|
|
|
|
|
// Unscaled is for the purposes of re-communicating this font back to the renderer again later.
|
|
|
|
// As such, we need to give the same original size parameter back here without padding
|
|
|
|
// or rounding or scaling manipulation.
|
2019-08-29 20:27:39 +02:00
|
|
|
const COORD unscaled = desired.GetEngineSize();
|
2019-05-03 00:29:04 +02:00
|
|
|
|
2019-08-29 20:27:39 +02:00
|
|
|
const COORD scaled = coordSize;
|
2019-05-03 00:29:04 +02:00
|
|
|
|
Allow FontInfo{,Base,Desired} to store a font name > 32 wch (#3107)
We now truncate the font name as it goes out to GDI APIs, in console API
servicing, and in the propsheet.
I attempted to defer truncating the font to as far up the stack as
possible, so as to make FontInfo usable for the broadest set of cases.
There were a couple questions that came up: I know that `Settings` gets
memset (memsat?) by the registry deserializer, and perhaps that's
another place for us to tackle. Right now, this pull request enables
fonts whose names are >= 32 characters _in Windows Terminal only_, but
the underpinnings are there for conhost as well. We'd need to explicitly
break at the API, or perhaps return a failure or log something to
telemetry.
* Should we log truncation at the API boundary to telemetry?
-> Later; followup filed (#3123)
* Should we fix Settings here, or later?
-> Later; followup filed (#3123)
* `TrueTypeFontList` is built out of things in winconp, the private
console header. Concern about interop structures.
-> Not used for interop, followup filed to clean it up (#3123)
* Is `unsigned int` right for codepage? For width?
-> Yes: codepage became UINT (from WORD) when we moved from Win16 to
Win32
This commit also includes a workaround for #3170. Growing
CONSOLE_INFORMATION made us lose the struct layout lottery during
release builds, and this was an expedient fix.
Closes #602.
Related to #3123.
2019-10-15 06:23:45 +02:00
|
|
|
actual.SetFromEngine(fontName,
|
2019-05-03 00:29:04 +02:00
|
|
|
desired.GetFamily(),
|
Allow FontInfo{,Base,Desired} to store a font name > 32 wch (#3107)
We now truncate the font name as it goes out to GDI APIs, in console API
servicing, and in the propsheet.
I attempted to defer truncating the font to as far up the stack as
possible, so as to make FontInfo usable for the broadest set of cases.
There were a couple questions that came up: I know that `Settings` gets
memset (memsat?) by the registry deserializer, and perhaps that's
another place for us to tackle. Right now, this pull request enables
fonts whose names are >= 32 characters _in Windows Terminal only_, but
the underpinnings are there for conhost as well. We'd need to explicitly
break at the API, or perhaps return a failure or log something to
telemetry.
* Should we log truncation at the API boundary to telemetry?
-> Later; followup filed (#3123)
* Should we fix Settings here, or later?
-> Later; followup filed (#3123)
* `TrueTypeFontList` is built out of things in winconp, the private
console header. Concern about interop structures.
-> Not used for interop, followup filed to clean it up (#3123)
* Is `unsigned int` right for codepage? For width?
-> Yes: codepage became UINT (from WORD) when we moved from Win16 to
Win32
This commit also includes a workaround for #3170. Growing
CONSOLE_INFORMATION made us lose the struct layout lottery during
release builds, and this was an expedient fix.
Closes #602.
Related to #3123.
2019-10-15 06:23:45 +02:00
|
|
|
textFormat->GetFontWeight(),
|
2019-05-03 00:29:04 +02:00
|
|
|
false,
|
|
|
|
scaled,
|
|
|
|
unscaled);
|
Refactor grid line renderers with support for more line types (#7107)
This is a refactoring of the grid line renderers, adjusting the line
widths to scale with the font size, and optimising the implementation to
cut down on the number of draw calls. It also extends the supported grid
line types to include true underlines and strike-through lines in the
style of the active font.
The main gist of the optimisation was to render the horizontal lines
with a single draw call, instead of a loop with lots of little strokes
joined together. In the case of the vertical lines, which still needed
to be handled in a loop, I've tried to move the majority of static
calculations outside the loop, so there is bit of optimisation there
too.
At the same time this code was updated to support a variable stroke
width for the lines, instead of having them hardcoded to 1 pixel. The
width is now calculated as a fraction of the font size (0.025 "em"),
which is still going to be 1 pixel wide in most typical usage, but will
scale up appropriately if you zoom in far enough.
And in preparation for supporting the SGR strike-through attribute, and
true underlines, I've extended the grid line renders with options for
handling those line types as well. The offset and thickness of the lines
is obtained from the font metrics (rounded to a pixel width, with a
minimum of one pixel), so they match the style of the font.
VALIDATION
For now we're still only rendering grid lines, and only the top and
bottom lines in the case of the DirectX renderer in Windows Terminal. So
to test, I hacked in some code to force the renderer to use all the
different options, confirming that they were working in both the GDI and
DirectX renderers.
I've tested the output with a number of different fonts, comparing it
with the same text rendered in WordPad. For the most part they match
exactly, but there can be slight differences when we adjust the font
size for grid alignment. And in the case of the GDI renderer, where
we're working with pixel heights rather than points, it's difficult to
match the sizes exactly.
This is a first step towards supporting the strike-through attribute
(#6205) and true underlines (#2915).
Closes #6911
2020-07-31 00:43:37 +02:00
|
|
|
|
|
|
|
// There is no font metric for the grid line width, so we use a small
|
|
|
|
// multiple of the font size, which typically rounds to a pixel.
|
|
|
|
lineMetrics.gridlineWidth = std::round(fontSize * 0.025f);
|
|
|
|
|
|
|
|
// All other line metrics are in design units, so to get a pixel value,
|
|
|
|
// we scale by the font size divided by the design-units-per-em.
|
|
|
|
const auto scale = fontSize / fontMetrics.designUnitsPerEm;
|
|
|
|
lineMetrics.underlineOffset = std::round(fontMetrics.underlinePosition * scale);
|
|
|
|
lineMetrics.underlineWidth = std::round(fontMetrics.underlineThickness * scale);
|
|
|
|
lineMetrics.strikethroughOffset = std::round(fontMetrics.strikethroughPosition * scale);
|
|
|
|
lineMetrics.strikethroughWidth = std::round(fontMetrics.strikethroughThickness * scale);
|
|
|
|
|
|
|
|
// We always want the lines to be visible, so if a stroke width ends up
|
|
|
|
// at zero after rounding, we need to make it at least 1 pixel.
|
|
|
|
lineMetrics.gridlineWidth = std::max(lineMetrics.gridlineWidth, 1.0f);
|
|
|
|
lineMetrics.underlineWidth = std::max(lineMetrics.underlineWidth, 1.0f);
|
|
|
|
lineMetrics.strikethroughWidth = std::max(lineMetrics.strikethroughWidth, 1.0f);
|
|
|
|
|
|
|
|
// Offsets are relative to the base line of the font, so we subtract
|
|
|
|
// from the ascent to get an offset relative to the top of the cell.
|
|
|
|
lineMetrics.underlineOffset = fullPixelAscent - lineMetrics.underlineOffset;
|
|
|
|
lineMetrics.strikethroughOffset = fullPixelAscent - lineMetrics.strikethroughOffset;
|
|
|
|
|
Add support for the "doubly underlined" graphic rendition attribute (#7223)
This PR adds support for the ANSI _doubly underlined_ graphic rendition
attribute, which is enabled by the `SGR 21` escape sequence.
There was already an `ExtendedAttributes::DoublyUnderlined` flag in the
`TextAttribute` class, but I needed to add `SetDoublyUnderlined` and
`IsDoublyUnderlined` methods to access that flag, and update the
`SetGraphicsRendition` methods of the two dispatchers to set the
attribute on receipt of the `SGR 21` sequence. I also had to update the
existing `SGR 24` handler to reset _DoublyUnderlined_ in addition to
_Underlined_, since they share the same reset sequence.
For the rendering, I've added a new grid line type, which essentially
just draws an additional line with the same thickness as the regular
underline, but slightly below it - I found a gap of around 0.05 "em"
between the lines looked best. If there isn't enough space in the cell
for that gap, the second line will be clamped to overlap the first, so
you then just get a thicker line. If there isn't even enough space below
for a thicker line, we move the offset _above_ the first line, but just
enough to make it thicker.
The only other complication was the update of the `Xterm256Engine` in
the VT renderer. As mentioned above, the two underline attributes share
the same reset sequence, so to forward that state over conpty we require
a slightly more complicated process than with most other attributes
(similar to _Bold_ and _Faint_). We first check whether either underline
attribute needs to be turned off to send the reset sequence, and then
check individually if each of them needs to be turned back on again.
## Validation Steps Performed
For testing, I've extended the existing attribute tests in
`AdapterTest`, `VTRendererTest`, and `ScreenBufferTests`, to make sure
we're covering both the _Underlined_ and _DoublyUnderlined_ attributes.
I've also manually tested the `SGR 21` sequence in conhost and Windows
Terminal, with a variety of fonts and font sizes, to make sure the
rendering was reasonably distinguishable from a single underline.
Closes #2916
2020-08-10 19:06:16 +02:00
|
|
|
// For double underlines we need a second offset, just below the first,
|
|
|
|
// but with a bit of a gap (about double the grid line width).
|
|
|
|
lineMetrics.underlineOffset2 = lineMetrics.underlineOffset +
|
|
|
|
lineMetrics.underlineWidth +
|
|
|
|
std::round(fontSize * 0.05f);
|
|
|
|
|
|
|
|
// However, we don't want the underline to extend past the bottom of the
|
|
|
|
// cell, so we clamp the offset to fit just inside.
|
|
|
|
const auto maxUnderlineOffset = lineSpacing.height - _lineMetrics.underlineWidth;
|
|
|
|
lineMetrics.underlineOffset2 = std::min(lineMetrics.underlineOffset2, maxUnderlineOffset);
|
|
|
|
|
|
|
|
// But if the resulting gap isn't big enough even to register as a thicker
|
|
|
|
// line, it's better to place the second line slightly above the first.
|
|
|
|
if (lineMetrics.underlineOffset2 < lineMetrics.underlineOffset + lineMetrics.gridlineWidth)
|
|
|
|
{
|
|
|
|
lineMetrics.underlineOffset2 = lineMetrics.underlineOffset - lineMetrics.gridlineWidth;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We also add half the stroke width to the offsets, since the line
|
Refactor grid line renderers with support for more line types (#7107)
This is a refactoring of the grid line renderers, adjusting the line
widths to scale with the font size, and optimising the implementation to
cut down on the number of draw calls. It also extends the supported grid
line types to include true underlines and strike-through lines in the
style of the active font.
The main gist of the optimisation was to render the horizontal lines
with a single draw call, instead of a loop with lots of little strokes
joined together. In the case of the vertical lines, which still needed
to be handled in a loop, I've tried to move the majority of static
calculations outside the loop, so there is bit of optimisation there
too.
At the same time this code was updated to support a variable stroke
width for the lines, instead of having them hardcoded to 1 pixel. The
width is now calculated as a fraction of the font size (0.025 "em"),
which is still going to be 1 pixel wide in most typical usage, but will
scale up appropriately if you zoom in far enough.
And in preparation for supporting the SGR strike-through attribute, and
true underlines, I've extended the grid line renders with options for
handling those line types as well. The offset and thickness of the lines
is obtained from the font metrics (rounded to a pixel width, with a
minimum of one pixel), so they match the style of the font.
VALIDATION
For now we're still only rendering grid lines, and only the top and
bottom lines in the case of the DirectX renderer in Windows Terminal. So
to test, I hacked in some code to force the renderer to use all the
different options, confirming that they were working in both the GDI and
DirectX renderers.
I've tested the output with a number of different fonts, comparing it
with the same text rendered in WordPad. For the most part they match
exactly, but there can be slight differences when we adjust the font
size for grid alignment. And in the case of the GDI renderer, where
we're working with pixel heights rather than points, it's difficult to
match the sizes exactly.
This is a first step towards supporting the strike-through attribute
(#6205) and true underlines (#2915).
Closes #6911
2020-07-31 00:43:37 +02:00
|
|
|
// coordinates designate the center of the line.
|
|
|
|
lineMetrics.underlineOffset += lineMetrics.underlineWidth / 2.0f;
|
Add support for the "doubly underlined" graphic rendition attribute (#7223)
This PR adds support for the ANSI _doubly underlined_ graphic rendition
attribute, which is enabled by the `SGR 21` escape sequence.
There was already an `ExtendedAttributes::DoublyUnderlined` flag in the
`TextAttribute` class, but I needed to add `SetDoublyUnderlined` and
`IsDoublyUnderlined` methods to access that flag, and update the
`SetGraphicsRendition` methods of the two dispatchers to set the
attribute on receipt of the `SGR 21` sequence. I also had to update the
existing `SGR 24` handler to reset _DoublyUnderlined_ in addition to
_Underlined_, since they share the same reset sequence.
For the rendering, I've added a new grid line type, which essentially
just draws an additional line with the same thickness as the regular
underline, but slightly below it - I found a gap of around 0.05 "em"
between the lines looked best. If there isn't enough space in the cell
for that gap, the second line will be clamped to overlap the first, so
you then just get a thicker line. If there isn't even enough space below
for a thicker line, we move the offset _above_ the first line, but just
enough to make it thicker.
The only other complication was the update of the `Xterm256Engine` in
the VT renderer. As mentioned above, the two underline attributes share
the same reset sequence, so to forward that state over conpty we require
a slightly more complicated process than with most other attributes
(similar to _Bold_ and _Faint_). We first check whether either underline
attribute needs to be turned off to send the reset sequence, and then
check individually if each of them needs to be turned back on again.
## Validation Steps Performed
For testing, I've extended the existing attribute tests in
`AdapterTest`, `VTRendererTest`, and `ScreenBufferTests`, to make sure
we're covering both the _Underlined_ and _DoublyUnderlined_ attributes.
I've also manually tested the `SGR 21` sequence in conhost and Windows
Terminal, with a variety of fonts and font sizes, to make sure the
rendering was reasonably distinguishable from a single underline.
Closes #2916
2020-08-10 19:06:16 +02:00
|
|
|
lineMetrics.underlineOffset2 += lineMetrics.underlineWidth / 2.0f;
|
Refactor grid line renderers with support for more line types (#7107)
This is a refactoring of the grid line renderers, adjusting the line
widths to scale with the font size, and optimising the implementation to
cut down on the number of draw calls. It also extends the supported grid
line types to include true underlines and strike-through lines in the
style of the active font.
The main gist of the optimisation was to render the horizontal lines
with a single draw call, instead of a loop with lots of little strokes
joined together. In the case of the vertical lines, which still needed
to be handled in a loop, I've tried to move the majority of static
calculations outside the loop, so there is bit of optimisation there
too.
At the same time this code was updated to support a variable stroke
width for the lines, instead of having them hardcoded to 1 pixel. The
width is now calculated as a fraction of the font size (0.025 "em"),
which is still going to be 1 pixel wide in most typical usage, but will
scale up appropriately if you zoom in far enough.
And in preparation for supporting the SGR strike-through attribute, and
true underlines, I've extended the grid line renders with options for
handling those line types as well. The offset and thickness of the lines
is obtained from the font metrics (rounded to a pixel width, with a
minimum of one pixel), so they match the style of the font.
VALIDATION
For now we're still only rendering grid lines, and only the top and
bottom lines in the case of the DirectX renderer in Windows Terminal. So
to test, I hacked in some code to force the renderer to use all the
different options, confirming that they were working in both the GDI and
DirectX renderers.
I've tested the output with a number of different fonts, comparing it
with the same text rendered in WordPad. For the most part they match
exactly, but there can be slight differences when we adjust the font
size for grid alignment. And in the case of the GDI renderer, where
we're working with pixel heights rather than points, it's difficult to
match the sizes exactly.
This is a first step towards supporting the strike-through attribute
(#6205) and true underlines (#2915).
Closes #6911
2020-07-31 00:43:37 +02:00
|
|
|
lineMetrics.strikethroughOffset += lineMetrics.strikethroughWidth / 2.0f;
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
|
|
|
CATCH_RETURN();
|
|
|
|
|
|
|
|
return S_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Helps convert a GDI COLORREF into a Direct2D ColorF
|
|
|
|
// Arguments:
|
|
|
|
// - color - GDI color
|
|
|
|
// Return Value:
|
|
|
|
// - D2D color
|
2019-06-11 22:27:09 +02:00
|
|
|
[[nodiscard]] D2D1_COLOR_F DxEngine::_ColorFFromColorRef(const COLORREF color) noexcept
|
2019-05-03 00:29:04 +02:00
|
|
|
{
|
|
|
|
// Converts BGR color order to RGB.
|
|
|
|
const UINT32 rgb = ((color & 0x0000FF) << 16) | (color & 0x00FF00) | ((color & 0xFF0000) >> 16);
|
|
|
|
|
|
|
|
switch (_chainMode)
|
|
|
|
{
|
|
|
|
case SwapChainMode::ForHwnd:
|
|
|
|
{
|
|
|
|
return D2D1::ColorF(rgb);
|
|
|
|
}
|
|
|
|
case SwapChainMode::ForComposition:
|
|
|
|
{
|
|
|
|
// Get the A value we've snuck into the highest byte
|
|
|
|
const BYTE a = ((color >> 24) & 0xFF);
|
|
|
|
const float aFloat = a / 255.0f;
|
|
|
|
|
|
|
|
return D2D1::ColorF(rgb, aFloat);
|
|
|
|
}
|
|
|
|
default:
|
2019-08-30 00:23:07 +02:00
|
|
|
FAIL_FAST_HR(E_NOTIMPL);
|
2019-05-03 00:29:04 +02:00
|
|
|
}
|
|
|
|
}
|
2019-11-13 19:17:39 +01:00
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Updates the selection background color of the DxEngine
|
|
|
|
// Arguments:
|
|
|
|
// - color - GDI Color
|
|
|
|
// Return Value:
|
|
|
|
// - N/A
|
2020-08-19 01:11:41 +02:00
|
|
|
void DxEngine::SetSelectionBackground(const COLORREF color, const float alpha) noexcept
|
2019-11-13 19:17:39 +01:00
|
|
|
{
|
|
|
|
_selectionBackground = D2D1::ColorF(GetRValue(color) / 255.0f,
|
|
|
|
GetGValue(color) / 255.0f,
|
|
|
|
GetBValue(color) / 255.0f,
|
2020-08-19 01:11:41 +02:00
|
|
|
alpha);
|
2019-11-13 19:17:39 +01:00
|
|
|
}
|
2020-02-25 23:19:57 +01:00
|
|
|
|
|
|
|
// Routine Description:
|
|
|
|
// - Changes the antialiasing mode of the renderer. This must be called before
|
|
|
|
// _PrepareRenderTarget, otherwise the renderer will default to
|
|
|
|
// D2D1_TEXT_ANTIALIAS_MODE_GRAYSCALE.
|
|
|
|
// Arguments:
|
|
|
|
// - antialiasingMode: a value from the D2D1_TEXT_ANTIALIAS_MODE enum. See:
|
|
|
|
// https://docs.microsoft.com/en-us/windows/win32/api/d2d1/ne-d2d1-d2d1_text_antialias_mode
|
|
|
|
// Return Value:
|
|
|
|
// - N/A
|
|
|
|
void DxEngine::SetAntialiasingMode(const D2D1_TEXT_ANTIALIAS_MODE antialiasingMode) noexcept
|
2020-06-19 23:09:37 +02:00
|
|
|
try
|
2020-02-25 23:19:57 +01:00
|
|
|
{
|
2020-06-19 23:09:37 +02:00
|
|
|
if (_antialiasingMode != antialiasingMode)
|
|
|
|
{
|
|
|
|
_antialiasingMode = antialiasingMode;
|
|
|
|
LOG_IF_FAILED(InvalidateAll());
|
|
|
|
}
|
2020-02-25 23:19:57 +01:00
|
|
|
}
|
2020-06-19 23:09:37 +02:00
|
|
|
CATCH_LOG()
|
2020-04-24 19:16:34 +02:00
|
|
|
|
|
|
|
// Method Description:
|
|
|
|
// - Update our tracker of the opacity of our background. We can only
|
|
|
|
// effectively render cleartype text onto fully-opaque backgrounds. If we're
|
|
|
|
// rendering onto a transparent surface (like acrylic), then cleartype won't
|
|
|
|
// work correctly, and will actually just additively blend with the
|
|
|
|
// background. This is here to support GH#5098.
|
|
|
|
// Arguments:
|
|
|
|
// - opacity: the new opacity of our background, on [0.0f, 1.0f]
|
|
|
|
// Return Value:
|
|
|
|
// - <none>
|
|
|
|
void DxEngine::SetDefaultTextBackgroundOpacity(const float opacity) noexcept
|
2020-06-02 00:29:05 +02:00
|
|
|
try
|
2020-04-24 19:16:34 +02:00
|
|
|
{
|
|
|
|
_defaultTextBackgroundOpacity = opacity;
|
|
|
|
|
|
|
|
// Make sure we redraw all the cells, to update whether they're actually
|
|
|
|
// drawn with cleartype or not.
|
|
|
|
// We don't terribly care if this fails.
|
|
|
|
LOG_IF_FAILED(InvalidateAll());
|
|
|
|
}
|
2020-06-02 00:29:05 +02:00
|
|
|
CATCH_LOG()
|
2020-06-04 14:58:22 +02:00
|
|
|
|
|
|
|
// Method Description:
|
|
|
|
// - Informs this render engine about certain state for this frame at the
|
|
|
|
// beginning of this frame. We'll use it to get information about the cursor
|
|
|
|
// before PaintCursor is called. This enables the DX renderer to draw the
|
|
|
|
// cursor underneath the text.
|
|
|
|
// - This is called every frame. When the cursor is Off or out of frame, the
|
|
|
|
// info's cursorInfo will be set to std::nullopt;
|
|
|
|
// Arguments:
|
|
|
|
// - info - a RenderFrameInfo with information about the state of the cursor in this frame.
|
|
|
|
// Return Value:
|
|
|
|
// - S_OK
|
|
|
|
[[nodiscard]] HRESULT DxEngine::PrepareRenderInfo(const RenderFrameInfo& info) noexcept
|
|
|
|
{
|
2020-06-22 18:13:09 +02:00
|
|
|
_drawingContext->cursorInfo = info.cursorInfo;
|
2020-06-04 14:58:22 +02:00
|
|
|
return S_OK;
|
|
|
|
}
|