2013 – Access Denied When Saving Web Part Properties

I am using the ResultScriptWebPart web part, I am trying to change a display model by type (dynamic result).
when i run the code with an anonymous user, i get the following error:

Access Denied When Saving Web Part Properties: The Web Part Is Embedded Directly Into The Page Or You Don't Have Sufficient Permissions To Save Properties (Web Part Search Results)

After that I tried to use SPSecurity.RunWithElevatedPrivileges (just to see if it will work) but my code reaches the same error.
Is there a way to give this block of code sufficient permission to run the web part under an anonymous user

coded:

 protected override void OnLoad(EventArgs e)
    {

                this.ViewStateMode = ViewStateMode.Disabled;
                this.ClearCachedClientID();
                this.ClearChildControlState();
                this.ClearChildState();
                this.ClearChildViewState();
                this.ClearEffectiveClientIDMode();

                if (this.AppManager != null)
                {
                    if (this.AppManager.QueryGroups.ContainsKey(this.QueryGroupName) &&
                    this.AppManager.QueryGroups(this.QueryGroupName).DataProvider != null)
                    {
                        this.AppManager.QueryGroups(this.QueryGroupName).DataProvider.BeforeSerializeToClient +=
                        new BeforeSerializeToClientEventHandler(EnhanceQuery);
                    }
                }

                base.OnLoad(e);
   }
private void EnhanceQuery(object sender, ScriptWebPart.BeforeSerializeToClientEventArgs e)
{

RenderClientScript();
string itemTemplateID = "";
string query = "";
string controlTemplate = "";
if (String.IsNullOrEmpty(HttpContext.Current.Request.QueryString("equery1")) == false)
{
    query = "equery1={QueryString.query1} AND equery2={QueryString.equery2} AND equery3={QueryString.equery3} ContentSource:Clalit_SeferSherutEmp";
    itemTemplateID = "path.js";
    itemTemplateID = "path1.js";
    controlTemplate = "controlpath1.js";
}
else if (String.IsNullOrEmpty(HttpContext.Current.Request.QueryString("query4")) == false)
{
    query = "query4:{QueryString.query4}   ContentSource:Clalit_SeferSherutDept";
    itemTemplateID = "itemPath2.js";
    controlTemplate = "controlpath1.js";
}
else if (String.IsNullOrEmpty(HttpContext.Current.Request.QueryString("query5")) == false)
{
    query = "query5={QueryString.query5}  ContentSource:csMedicine";
    itemTemplateID = "itemPath3.js";
    controlTemplate = "controlPath2.js";
}



this.RenderTemplateId = controlTemplate;
this.ItemTemplateId = itemTemplateID;
this.BypassResultTypes = true;
this.SaveProperties = true;
}

plotting – How to draw the corresponding part of the projection mesh of this surface

I want to divide a surface f(x_, y_) := (1.4025 - (-0.2 + x)^2 - (-0.2 + y)^2)^0.5 + 0.15 in mesh corresponding to the region x0y

f(x_, y_) := (1.4025 - (-0.2 + x)^2 - (-0.2 + y)^2)^0.5 + 0.15;
X(x_, y_) := Evaluate@D(f(x, y), x)
Y(x_, y_) := Evaluate@D(f(x, y), y)
GetRegion(n_, 
  m_) := {TwoD$Points = Outer(List, #, #) &@Subdivide(0, 1, n); 
   TwoD$Points = (Flatten(#, 1)(({1, 2, 4, 3})) & /@ 
      Flatten(Partition(TwoD$Points, {2, 2}, 1), 1));
   TwoD$Points((m))} // Flatten(#, 1) &
GetLine(n_, 
  m_) := {TwoD$Points = Outer(List, #, #) &@Subdivide(0, 1, n); 
   TwoD$Points = (Flatten(#, 1)(({1, 2, 4, 3})) & /@ 
      Flatten(Partition(TwoD$Points, {2, 2}, 1), 1));
   ThreeD$Points = Map(Flatten({#, 0}) &, TwoD$Points, {2})((m)); 
   List(ThreeD$Points, 
     Apply({#1, #2, f(#1, #2)} &, ThreeD$Points, 2)) // Transpose} // 
  Flatten(#, 1) &
NormalLine(n_, 
  m_) := {TwoD$Points = Outer(List, #, #) &@Subdivide(0, 1, n); 
   TwoD$Points = (Flatten(#, 1)(({1, 2, 4, 3})) & /@ 
      Flatten(Partition(TwoD$Points, {2, 2}, 1), 1));
   ThreeD$Points = Map(Flatten({#, 0}) &, TwoD$Points, {2})((m)); 
   List(ThreeD$Points, 
     Apply({#1, #2, f(#1, #2)} &, ThreeD$Points, 2)) // Transpose} // 
  Flatten(#, 1) &


Manipulate(region = GetRegion(n, m); 
 Show(Plot3D({f(x, y), 0}, {x, 0, 1}, {y, 0, 1}, 
   MeshFunctions -> {#1 &, #2 &}, Mesh -> n - 1, 
   MeshShading -> 
    ReplacePart(
     ConstantArray(Automatic, {n, n}), {Mod(m, n, 1), Ceiling(m/n)} ->
       Red), PlotRange -> {{0, 1.6}, {-0.2, 1.4}, {0, 1.6}}, 
   BoxRatios -> Automatic), 
  Plot3D(X((region((1, 1)) + region((3, 1)))/
       2, (region((1, 2)) + region((2, 2)))/
       2) (x - (region((1, 1)) + region((3, 1)))/2) + 
    Y((region((1, 1)) + region((3, 1)))/
       2, (region((1, 2)) + region((2, 2)))/
       2) (y - (region((1, 2)) + region((2, 2)))/2) + 
    f((region((1, 1)) + region((3, 1)))/
      2, (region((1, 2)) + region((2, 2)))/2), {x, region((1, 1)), 
    region((3, 1))}, {y, region((1, 2)), region((2, 2))}, 
   Mesh -> None, PlotStyle -> {Blue, Opacity(0.6)}), 
  Graphics3D({Thick, Dashed, Pink, Line@GetLine(n, m)})), {{n, 4, 
   "Divide quantity"}, 3, 30, 1, 
  Appearance -> "Open"}, {{m, 1, "Division number of sub region j"}, 
  1, n^2, 1, Appearance -> "Open"})

But I cannot match the jth red mesh of xoy with the surface f(x_, y_) := (1.4025 - (-0.2 + x)^2 - (-0.2 + y)^2)^0.5 + 0.15.What should I do to make the corresponding part of the surface red?
enter description of image here

screen – Part of the display does not work, change the display region for macbook pro

Recently, something went wrong with the screen of my 2019 Macbook Pro 13 "model. About 1.5 inches (in length) of my screen went completely black and there are vertical lines on the edges of this black part. It seems to be a hardware problem.

I was wondering if there was anything I could do to make my whole screen appear only in the right part of this black part, until I found time to give my laptop for repair. I know I can just resize my window every time, but I don't want to keep doing it every time I open a new window.

Thank you.

Create a new column as a group as part of pandas data

Hello guys, I need your help to solve the following problem in the pandas data frame. I have a file which contains data as shown here:

enter description of image here

And I want to get the output as follows:

enter description of image here

I appreciate any help / suggestions …. please!

Thank you!

Can you call functions that are not tested as part of the unit test?

So I have "Services" in my system that manage creation, update, etc. Datas.

For example, state_service.create () would create a new state in the database. This state belongs to a group.

The problem is that the group must be created first, before the state can be added.

Now I can always call group_service.create () first, then create the state afterwards, like this:

def test_state_service_create(self):
    self.group_service.create()
    state_id = self.state_service.create()
    self.assertTrue(state_id)

however, I'm not sure this is bad practice, as it now rests on the correct functioning of group_service.create (). The alternative is to manually create the group myself, for example:

def test_state_service_create(self):
    self.db.groups.insert_one(self.mock_groups(0))
    state_id = self.state_service.create()
    self.assertTrue(state_id)

But it would just mean that I would need to update the dummy data if the schema changes.

What is good practice here or is it something else?

unity – how to apply the Image effect only on a specific part of objects?

Using GrabPass

GrabPass is a special type of pass – it captures the content of the screen where the object is about to be drawn in a texture. This texture can be used in the following passes to perform advanced image-based effects.

Just GrabPass {} captures the contents of the current screen in a texture. The texture is accessible in other passages by the name _GrabTexture. Note: this form of capture will do the long screen capture operation for each object that uses it.

GrabPass {"TextureName"} captures the contents of the current screen in a texture, but will only do this once per image for the first object that uses the given texture name. The texture can be viewed in other passages by the given texture name. This is a more efficient method when multiple objects use GrabPass in the scene

GrabPass{ "_GrabTexture" }

Initialize the position and enter the position in the shader:

    v2f vert(appdata_base v) {
        v2f o;
        // use UnityObjectToClipPos from UnityCG.cginc to calculate 
        // the clip-space of the vertex
        o.pos = UnityObjectToClipPos(v.vertex);
        // use ComputeGrabScreenPos function from UnityCG.cginc
        // to get the correct texture coordinate
        o.grabPos = ComputeGrabScreenPos(o.pos);
        return o;
    }

pixelation results:

    float _PixelSize;
    sampler2D _GrabTexture;

    float4 frag(v2f IN) : COLOR
    {
        float2 steppedUV = IN.grabPos.xy / IN.grabPos.w;
        steppedUV /= _PixelSize / _ScreenParams.xy;
        steppedUV = round(steppedUV);
        steppedUV *= _PixelSize / _ScreenParams.xy;
        //Sampling GrabTexture according to screen capture positon, tex2DProj is equivalent to tex2D (grabPos.xy / grabpos.w)
        return tex2D(_GrabTexture, steppedUV);
    }

here is our pixelated shader:

Shader "Custom/Pixelate"
{
    Properties
    {
        _PixelSize("Pixel Size", Float) = 10
    }

    SubShader
    {
        Tags{ "Queue" = "Transparent" "IgnoreProjector" = "True" }
        Blend Off
        Lighting Off
        Fog{ Mode Off }
        ZWrite Off
        LOD 200
        Cull Off

        GrabPass{ "_GrabTexture" }

        Pass
        {
            CGPROGRAM

            #pragma vertex vert
            #pragma fragment frag
            #include "UnityCG.cginc"

            struct v2f
            {
                float4 pos : SV_POSITION;
                float4 grabPos  : TEXCOORD0;
            };


            v2f vert(appdata_base v) {
                v2f o;
                // use UnityObjectToClipPos from UnityCG.cginc to calculate 
                // the clip-space of the vertex
                o.pos = UnityObjectToClipPos(v.vertex);
                // use ComputeGrabScreenPos function from UnityCG.cginc
                // to get the correct texture coordinate
                o.grabPos = ComputeGrabScreenPos(o.pos);
                return o;
            }

            float _PixelSize;
            sampler2D _GrabTexture;

            float4 frag(v2f IN) : COLOR
            {
                float2 steppedUV = IN.grabPos.xy / IN.grabPos.w;
                steppedUV /= _PixelSize / _ScreenParams.xy;
                steppedUV = round(steppedUV);
                steppedUV *= _PixelSize / _ScreenParams.xy;
                //Sampling GrabTexture according to screen capture positon, tex2DProj is equivalent to tex2D (grabPos.xy / grabpos.w)
                return tex2D(_GrabTexture, steppedUV);
            }

            ENDCG
        }
    }
}

enter description of image here

1. Make a secondary camera

2. On the second camera, in Clear Flags, set it to Don & # 39; t clear

enter description of image here
enter description of image here

Using the control pad

we know that the GPU can operate in parallel. But what about communication between CPU and GPU? Does the processor have to wait for the GPU to finish the job before it can receive new commands?
enter description of image here

No! because this communication would create bottlenecks (for example, when the processor cannot deliver orders fast enough) and make parallel work impossible. or how much data can be processed at one time.

The solution is a list where commands can be added by the CPU and read by the GPU – independently of each other! This list is called: Command buffer.

The command buffers contain the list of rendering commands ("define the rendering target, draw the mesh, …"). They can be configured to run at different points during camera rendering (see Camera.AddCommandBuffer), light rendering (see Light.AddCommandBuffer) or run immediately (see Graphics.ExecuteCommandBuffer).

The command buffer allows the CPU and GPU to operate independently of each other. When the CPU wants something to be returned, it can push this command to the queue and when the GPU has free resources, it can remove the command from the list and execute it (but the list works like a FIFO – so the GPU can only take the oldest item from the list (which was added first / earlier than all the others) and work on it).

By the way: there are different possible commands. One example is a draw call, another would be to change the render state.

enter description of image here

https://vulkan-tutorial.com/Drawing_a_triangle/Drawing/Command_buffers

https://developer.nvidia.com/engaging-voyage-vulkan

enter description of image here

Let's start using the command buffer
enter description of image here
Hiding part of the character with a sphere:

 Stencil
     {
         Ref 1
         Comp Always
         Pass Replace
     }

enter description of image here

create an order:

var commandBuffer = new CommandBuffer();
commandBuffer.name = "pixelate";

enter description of image here

Temporarily copy the rendering result to the rendering texture:

  int tempTextureIdentifier = Shader.PropertyToID("_PostEffect");
        commandBuffer.GetTemporaryRT(tempTextureIdentifier, -1, -1);
        commandBuffer.Blit(BuiltinRenderTextureType.CameraTarget, tempTextureIdentifier);

enter description of image here

Apply the rendering result to the material to which you want to apply image effects:

commandBuffer.Blit(tempTextureIdentifier, BuiltinRenderTextureType.CameraTarget, material);

enter description of image here

Release the temporary rendering texture:

commandBuffer.ReleaseTemporaryRT(tempTextureIdentifier);

enter description of image here

Save where you want to add the command buffer
Below is a high-level overview of how cameras use the forward or delayed pipeline to render a scene in Unity.

  • The black boxes represent an internal Unity process. The blue boxes represent
  • a CameraEvent where you can add control buffers.

enter description of image here

camera.AddCommandBuffer(CameraEvent.BeforeImageEffects, commandBuffer);

enter description of image here

add image effects where Ref is equal to 1

   Stencil
    {
        Ref 1
        Comp Equal
    }

enter description of image here

adding color to the pixelated area

enter description of image here

More examples!

pixelation of specific objects

enter description of image here

grayscale where this is not equal to Ref 1

Stencil
{
    Ref 1
    Comp NotEqual
}

enter description of image here

The references:

Render Hell – Book I

【Unity】CommandBuffer とステンシルバッファで特定のモデルにモザイクをかけてみる

dnd 5e – How does the random play part of the divination spell work?

It's a DM call, but Augury can serve as an example

The "random" part seems to be the same text from Augury:

If you cast the spell two or more times before ending your next long rest, there is a 25% cumulative chance for each cast after the first that you get a random read. The DM makes this throw in secret.

In the case of Augury, the intention seems clearer: there are four possible outcomes, so if a random read occurs, a DM will probably roll at random for one of the four options:

  1. Weal, for good results
  2. Woe to the bad results
  3. Good and bad for good and bad results
  4. Nothing, for results that are not particularly good or bad

However, Divination (despite using the same text) does not have a finite list of results. As a DM, personally, I could still use the Augury results as an example (and run a d4, describing results similar to the above). Or I can run a d20 and get results: higher = better for PC, lower = worse for PC (I use this method to lot things). Or you can just use any short phrase, cryptic rhyme or omen that comes to mind (which makes it even easier for DM than a normal divination result).

To be complete (that is, you did not ask for it, but others would like to know), "25% cumulative chance for each casting after the first" means:

  1. First casting: 0% chance of random reading
  2. Second casting: 25% chance of random reading
  3. Third casting: 50% chance of random reading
  4. Fourth casting: 75% chance of random reading
  5. Fifth casting +: 100% chance of random reading

applications – How to integrate chromium[-like] browser as part of the Android app?

Our application is built on Qt-C ++ which acts as a backend. For the front-end, we invoke the browser command in a given platform (Windows, Mac, Linux). Now browser Javascript and backend C ++ communicate via WebSockets. All the rendering goes well and the application works well.
(Note: it turns out that Qt's web engine is not a good option due to its size and rigidity.)

However, in the case of mobile apps, there are few concerns.
– We cannot invoke the Chrome browser as we do in Desktops, because the main application goes in the background and puts in the foreground the "Google Chrome" application (browser)
– The default choice is Webview, but there are few limitations as shown here

Our application is not heavy in rendering. Although it requires video and webrtc playback (which is available with some of the Android web views).

What is the best solution when the Android app requires the functionality of the Google Chrome browser app?

sharepoint online – Add links to a Quick Links Web Part using PowerShell and JSON

I am trying to add links to a Quick Links Web Part on a modern SharePoint site, to do this I use PowerShell and JSON. I got the web part as a JSON file and accessed it using the Get-Contentorder.

The JSON looks like this:

{
        "controlType": 3,
        "id": "a9ed7796-5545-4623-a943-5be42762691d",
        "position": {
            "zoneIndex": 1,
            "sectionIndex": 1,
            "controlIndex": 1,
            "layoutIndex": 1
        },
        "webPartId": "c70391ea-0b10-4ee9-b2b4-006d3fcad0cd",
        "webPartData": {
            "id": "c70391ea-0b10-4ee9-b2b4-006d3fcad0cd",
            "instanceId": "a9ed7796-5545-4623-a943-5be42762691d",
            "title": "Quick links",
            "description": "Add links to important documents and pages.",
            "serverProcessedContent": {
                "htmlStrings": {},
                "searchablePlainTexts": {
                    "items[0].title": "Yahoo",
                    "items[1].title": "Google"
                },
                "imageSources": {
                    "items[0].rawPreviewImageUrl": "https://s.yimg.com/cv/apiv2/social/images/yahoo_default_logo.png"
                },
                "links": {
                    "baseUrl": "https://bbpocoutlook.sharepoint.com/sites/tl23",
                    "items[0].sourceItem.url": "https://yahoo.com",
                    "items[1].sourceItem.url": "https://google.com"
                },
                "componentDependencies": {
                    "layoutComponentId": "706e33c8-af37-4e7b-9d22-6e5694d92a6f"
                }
            },
            "dataVersion": "2.2",
            "properties": {
                "items": [
                    {
                        "sourceItem": {
                            "itemType": 2,
                            "fileExtension": "",
                            "progId": ""
                        },
                        "thumbnailType": 3,
                        "id": 2,
                        "description": "",
                        "altText": ""
                    },
                    {
                        "sourceItem": {
                            "itemType": 2,
                            "fileExtension": "",
                            "progId": ""
                        },
                        "thumbnailType": 3,
                        "id": 1,
                        "description": "",
                        "altText": ""
                    }
                ],
                "isMigrated": true,
                "layoutId": "List",
                "shouldShowThumbnail": true,
                "buttonLayoutOptions": {
                    "showDescription": false,
                    "buttonTreatment": 2,
                    "iconPositionType": 2,
                    "textAlignmentVertical": 2,
                    "textAlignmentHorizontal": 2,
                    "linesOfText": 2
                },
                "listLayoutOptions": {
                    "showDescription": false,
                    "showIcon": true
                },
                "waffleLayoutOptions": {
                    "iconSize": 1,
                    "onlyShowThumbnail": false
                },
                "hideWebPartWhenEmpty": true,
                "dataProviderId": "QuickLinks",
                "webId": "b5fdf80c-54ce-410f-a50d-910ea2e33250",
                "siteId": "0c8f4c9a-71e6-4fc0-8355-9b52f0a7eb3a"
            }
        },
        "emphasis": {},
        "reservedHeight": 132,
        "reservedWidth": 744,
        "addedFromPersistedData": true
    }

How do I add a new item to the Web Part and add a link to it?

amazon web services – Prevents Cloudfront from transferring part of the way to the origin server

Context:
I have an S3 Bucket (Origin 1) which serves as a static website under the domain example.com using Cloudfront.

Goal:

More i want example.com/subfolder to stream content from second.com (Origin 2). so that the following is true example.com/subfolder = second.com

Currently:

Under the Cloudfront distribution, I configured Origin 1 with behavior Default (*)
and Origin 2 with behavior /subfolder*

Problem:

Going to example.com/subfolder I am served second.com/subfolder

Q:
How and where to adjust the behavior of Cloudfronts not to pass the first part of the URL.