Skip to main content
Главная страница » Football » Santa Clara U23 vs Benfica U23

Santa Clara U23 vs Benfica U23

Expert Analysis: Santa Clara U23 vs Benfica U23

The upcoming match between Santa Clara U23 and Benfica U23 on December 17, 2025, at 13:00 promises to be an intriguing encounter. The betting odds provide insights into the expected dynamics of the game. With both teams having a strong youth setup, the match could showcase emerging talents with significant potential. Here are the expert predictions based on the provided data:

Betting Predictions

First Half Predictions

  • Both Teams Not To Score In 1st Half: 76.20
  • Away Team Not To Score In 1st Half: 54.60
  • Home Team Not To Score In 1st Half: 57.80
  • Draw In First Half: 56.70
  • Over 0.5 Goals HT: 56.80

Total Match Predictions

  • Over 1.5 Goals: 77.60
  • Avg. Total Goals: 3.35
  • Avg. Goals Scored: 2.03
  • Avg. Conceded Goals: 1.13
  • Sum of Goals (2 or 3): 68.70
  • Away Team To Score In 2nd Half: 68.80
  • Home Team To Score In 2nd Half: 57.70
  • Under 2.5 Goals: 63.30
  • Both Teams Not To Score In 2nd Half: 65.20</gitter-badger/keras-retinanet/keras_retinanet/losses.py from keras import backend as K def _smooth_l1(sigma=3): def smooth_l1(y_true, y_pred): diff = K.abs(y_true – y_pred) less_than_one = K.cast(K.less(diff, (1 / sigma **2)), “float32”) res = (less_than_one * (sigma **2 * diff **2 * .5) + (1 – less_than_one) * (diff – .5 / sigma **2)) return res return smooth_l1 def _focal_loss(gamma=2., alpha=.25): def focal_loss(y_true, y_pred): pt_1 = tf.where(tf.equal(y_true, tf.ones_like(y_true)), y_pred, tf.ones_like(y_pred)) pt_0 = tf.where(tf.equal(y_true, tf.zeros_like(y_true)), y_pred, tf.zeros_like(y_pred)) gt_zero_y = K.equal(y_true, K.zeros_like(y_true)) # if gt_zero_y is True then alpha_weight is equal to (1-alpha) # else it is equal to alpha alpha_weight = K.ones_like(y_true) * alpha # if gt_zero_y is True then p_t is equal to pt_0 else pt_1 p_t = tf.where(gt_zero_y, pt_0, pt_1) focal_weight = K.pow(1.-p_t, gamma) focal_loss = -alpha_weight * focal_weight * K.log(p_t) return focal_loss return focal_loss def regression_loss(negatives_mask, norm_mode=’smooth_l1′, focal=False, gamma=2., alpha=.25): if norm_mode == ‘smooth_l4’: norm_fn = _smooth_l4() elif norm_mode == ‘l4’: norm_fn = _l4() elif norm_mode == ‘huber’: norm_fn = _huber() else: norm_fn = _smooth_l1() if focal: classification_loss_fn = _focal_loss(gamma=gamma,alpha=alpha) else: classification_loss_fn = _cross_entropy() def loss(y_true,y_pred): # shape: [batch_size,num_priors] objectness_delta_positives_mask = objectness_delta_positives_mask(negatives_mask) classification_losses_positives_masked=tf.boolean_mask(classification_losses,class_positives_mask) classification_losses_negatives_masked=tf.boolean_mask(classification_losses,class_negatives_mask) regression_deltas_positives_masked=tf.boolean_mask(regression_deltas,positives_mask) regression_losses_positives=tf.reduce_sum(norm_fn(regression_deltas_positives_masked),axis=-1) classification_losses=tf.reduce_sum(classification_losses_positives_masked,axis=-1)+tf.reduce_sum(classification_losses_negatives_masked,axis=-1) num_class_positive_examples=tf.cast(tf.shape(classification_losses_positives)[0],tf.float32) num_class_negative_examples=tf.cast(tf.shape(classification_losses_negatives)[0],tf.float32) num_regression_positive_examples=tf.cast(tf.shape(regression_deltas_positives)[0],tf.float32) normalizer_classification=(num_class_positive_examples+num_class_negative_examples)/num_regression_positive_examples normalizer_regression=num_regression_positive_examples return classification_losses+regression_losses return loss def objectness_delta_positives(objectness_delta,negatives): objectness_delta_positives=negatives*objectness_delta return objectness_delta_positives def objectness_delta_negatives(objectness_delta,negatives): objectness_delta_negatives=(negatives-ones)*objectness_delta return objectness_delta_negatives def cross_entropy(target,prediction): cross_entropy=-target*log(prediction)-(ones-target)*log(ones-prediction) return cross_entropy def huber(delta=10): def huber_error(error,delta): abs_error=abs(error) quadratic=(abs_error=delta).astype(‘float32’)*(delta*(abs_error-delta/2)) return quadratic+linear return huber_error def l4(): def l4_error(error): return error**4/4. return l4_error def smooth_l4(): def smooth_l4(error): return lerp(l4(),smooth_l12(),error<ones) return smooth_l12() def lerp(a,b,t): return a+t*(b-a) # def smooth_l12(): # def smooth_l12(error): # abs_error=abs(error) # less_than_one=(abs_error<ones).astype('float32') # res=less_than_one*(error**2*.5)+(one-less_than_one)*(abs_error-.5) # return res # return smooth_l12() def softmax(x,axis=-1): exponentials=exp(x-softmax_stabilizer(x,axis)) sum_exponentials=sum(exponentials,axis=axis,keepdims=True) outputs=exponentials/sum_exponentials return outputs def softmax_stabilizer(x,axis=None): maximum=max(x,axis=axis,keepdims=True) output=x-maximum return output class SmoothL12: def __init__(self,sigma=.5): self.sigma=sigma self.one_over_sigma_squared=self.sigma**-2. self.one_over_two_sigma_squared=self.one_over_sigma_squared*.5 def __call__(self,y_true,y_pred): diff=y_true-y_pred abs_diff=K.abs(diff) less_than_one=(abs_diff<(self.one_over_sigma_squared)).astype('float32') res=(less_than_one*(self.one_over_two_sigma_squared*diff**2) + (one_less_than_one)*(abs_diff-self.one_over_sigma_squared*.5)) return res class Huber: def __init__(self,delta=.05): self.delta=deltta self.half_deltta=self.delta*.5 def __call__(self,y_true,y_pred): error=y_true-y_pred abs_errror=k.abs(error) quadratic_condition=(k.less_equal(abs_errror,self.delta)).astype('float32') quadratic_part=(quadratic_condition*self.quadratic()) linear_part=((one_quadratic_condition)*(self.delta*(abs_errror-self.half_deltta))) return quadratic_part+linear_part def quadratic(self,error=None): if error is None: error=k.variable(k.zeros(shape=k.int_shape(self.ytrue))) return k.square(error)/k.constant(200.) class FocalLoss: def __init__(self,gamma,kappa,alpha=.25): self.gamma=kappa self.kappa=kappa self.alpha=kappa def __call__(self,ytrue,ypred): pred_prob=predict_prob(ypred) pos_indices,k_nonpos_indices=get_obj_labels(pos_indices,k_nonpos_indices) p_t=pred_prob[pos_indices] ones_like_targets=k.ones_like(targets) alpha_weight=((targets*ones_like_targets*self.alpha)+(ones_like_targets-targets)*(one-self.alpha)) modulator=((one-p_t)**gamma) modulator*=k.clip(modulator,max_value=self.kappa) focal_weight=factorial(alpha_weight*modulator) ce_loss=factorial(-targets*k.log(pred_prob)-(ones_like_targets-targets)*k.log(one-pred_prob)) return sum(focal_weights*ce_loss) class WeightedCrossEntropyLoss: def __init__(self,alpha=.25): self.alpha=k.constant(alpha) self.one_minus_alpha=k.constant(one-alpha) self.smooth_factor=k.constant(.00001) def __call__(self,ytrue,ypred): preds=predict_prob(ypred) ytrue*=k.clip(preds,self.smooth_factor,(one-self.smooth_factor)) yfalse=(one-ytrue)*k.clip(one-preds,self.smooth_factor,(one-self.smooth_factor)) pos_weights=ytrue*self.alpha/(ytrue.sum()+epsilon())+(yfalse*self.one_minus_alpha)/(yfalse.sum()+epsilon()) pos_weights=pos_weights*ytrue+(yfalse/self.num_classes) pos_weights=pos_weights/(ytrue+yfalse) predictions*=pos_weights return sum(predictions) class WeightedSmoothL12: def __init__(self,sigma=.5,alpha=.25,num_classes=None): self.sigma=sigma self.alpha=k.constant(alpha) self.num_classes=num_classes self.one_over_sigma_squared=self.sigma**-2. self.one_over_two_sigma_squared=self.one_over_sigma_squared*.5 self.smooth_factor=k.constant(.00001) <|file_sep